From 9bdcd2a495d4189d6536d43f1294dffb38daa9a5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 11 Jul 2018 15:58:48 -0400 Subject: add std.event.Future This is like a promise, but it's for multiple getters, and uses an event loop. --- std/event/future.zig | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 std/event/future.zig (limited to 'std/event/future.zig') diff --git a/std/event/future.zig b/std/event/future.zig new file mode 100644 index 0000000000..8001f675a2 --- /dev/null +++ b/std/event/future.zig @@ -0,0 +1,87 @@ +const std = @import("../index.zig"); +const assert = std.debug.assert; +const builtin = @import("builtin"); +const AtomicRmwOp = builtin.AtomicRmwOp; +const AtomicOrder = builtin.AtomicOrder; +const Lock = std.event.Lock; +const Loop = std.event.Loop; + +/// This is a value that starts out unavailable, until a value is put(). +/// While it is unavailable, coroutines suspend when they try to get() it, +/// and then are resumed when the value is put(). +/// At this point the value remains forever available, and another put() is not allowed. +pub fn Future(comptime T: type) type { + return struct { + lock: Lock, + data: T, + available: u8, // TODO make this a bool + + const Self = this; + const Queue = std.atomic.QueueMpsc(promise); + + pub fn init(loop: *Loop) Self { + return Self{ + .lock = Lock.initLocked(loop), + .available = 0, + .data = undefined, + }; + } + + /// Obtain the value. If it's not available, wait until it becomes + /// available. + /// Thread-safe. + pub async fn get(self: *Self) T { + if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) { + return self.data; + } + const held = await (async self.lock.acquire() catch unreachable); + defer held.release(); + + return self.data; + } + + /// Make the data become available. May be called only once. + pub fn put(self: *Self, value: T) void { + self.data = value; + const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + assert(prev == 0); // put() called twice + Lock.Held.release(Lock.Held{ .lock = &self.lock }); + } + }; +} + +test "std.event.Future" { + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + + const allocator = &da.allocator; + + var loop: Loop = undefined; + try loop.initMultiThreaded(allocator); + defer loop.deinit(); + + const handle = try async testFuture(&loop); + defer cancel handle; + + loop.run(); +} + +async fn testFuture(loop: *Loop) void { + var future = Future(i32).init(loop); + + const a = async waitOnFuture(&future) catch @panic("memory"); + const b = async waitOnFuture(&future) catch @panic("memory"); + const c = async resolveFuture(&future) catch @panic("memory"); + + const result = (await a) + (await b); + cancel c; + assert(result == 12); +} + +async fn waitOnFuture(future: *Future(i32)) i32 { + return await (async future.get() catch @panic("memory")); +} + +async fn resolveFuture(future: *Future(i32)) void { + future.put(6); +} -- cgit v1.2.3 From 9751a0ae045110fb615c866b94ad47680b9c48c7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 11 Jul 2018 19:38:01 -0400 Subject: std.atomic: use spinlocks the lock-free data structures all had ABA problems and std.atomic.Stack had a possibility to load an unmapped memory address. --- CMakeLists.txt | 3 +- build.zig | 8 +- std/atomic/index.zig | 6 +- std/atomic/queue.zig | 226 ++++++++++++++++++++++++++++++++++++++++++++++ std/atomic/queue_mpmc.zig | 214 ------------------------------------------- std/atomic/queue_mpsc.zig | 185 ------------------------------------- std/atomic/stack.zig | 32 ++++--- std/event/channel.zig | 12 +-- std/event/future.zig | 21 +++-- std/event/lock.zig | 2 +- std/event/loop.zig | 6 +- test/tests.zig | 26 +++--- 12 files changed, 286 insertions(+), 455 deletions(-) create mode 100644 std/atomic/queue.zig delete mode 100644 std/atomic/queue_mpmc.zig delete mode 100644 std/atomic/queue_mpsc.zig (limited to 'std/event/future.zig') diff --git a/CMakeLists.txt b/CMakeLists.txt index 51d348f042..e606855555 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -432,8 +432,7 @@ set(ZIG_STD_FILES "array_list.zig" "atomic/index.zig" "atomic/int.zig" - "atomic/queue_mpmc.zig" - "atomic/queue_mpsc.zig" + "atomic/queue.zig" "atomic/stack.zig" "base64.zig" "buf_map.zig" diff --git a/build.zig b/build.zig index fd37138f33..c9e70887e3 100644 --- a/build.zig +++ b/build.zig @@ -91,11 +91,11 @@ pub fn build(b: *Builder) !void { test_step.dependOn(tests.addPkgTests(b, test_filter, "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests", modes)); - test_step.dependOn(tests.addCompareOutputTests(b, test_filter)); + test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes)); test_step.dependOn(tests.addBuildExampleTests(b, test_filter)); - test_step.dependOn(tests.addCompileErrorTests(b, test_filter)); - test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter)); - test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter)); + test_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes)); + test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes)); + test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes)); test_step.dependOn(tests.addTranslateCTests(b, test_filter)); test_step.dependOn(tests.addGenHTests(b, test_filter)); test_step.dependOn(docs_step); diff --git a/std/atomic/index.zig b/std/atomic/index.zig index cf344a8231..a94cff1973 100644 --- a/std/atomic/index.zig +++ b/std/atomic/index.zig @@ -1,11 +1,9 @@ pub const Stack = @import("stack.zig").Stack; -pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc; -pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc; +pub const Queue = @import("queue.zig").Queue; pub const Int = @import("int.zig").Int; test "std.atomic" { _ = @import("stack.zig"); - _ = @import("queue_mpsc.zig"); - _ = @import("queue_mpmc.zig"); + _ = @import("queue.zig"); _ = @import("int.zig"); } diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig new file mode 100644 index 0000000000..1fd07714e8 --- /dev/null +++ b/std/atomic/queue.zig @@ -0,0 +1,226 @@ +const builtin = @import("builtin"); +const AtomicOrder = builtin.AtomicOrder; +const AtomicRmwOp = builtin.AtomicRmwOp; + +/// Many producer, many consumer, non-allocating, thread-safe. +/// Uses a spinlock to protect get() and put(). +pub fn Queue(comptime T: type) type { + return struct { + head: ?*Node, + tail: ?*Node, + lock: u8, + + pub const Self = this; + + pub const Node = struct { + next: ?*Node, + data: T, + }; + + pub fn init() Self { + return Self{ + .head = null, + .tail = null, + .lock = 0, + }; + } + + pub fn put(self: *Self, node: *Node) void { + node.next = null; + + while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + + const opt_tail = self.tail; + self.tail = node; + if (opt_tail) |tail| { + tail.next = node; + } else { + assert(self.head == null); + self.head = node; + } + } + + pub fn get(self: *Self) ?*Node { + while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + + const head = self.head orelse return null; + self.head = head.next; + if (head.next == null) self.tail = null; + return head; + } + + pub fn isEmpty(self: *Self) bool { + return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null; + } + + pub fn dump(self: *Self) void { + while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + + std.debug.warn("head: "); + dumpRecursive(self.head, 0); + std.debug.warn("tail: "); + dumpRecursive(self.tail, 0); + } + + fn dumpRecursive(optional_node: ?*Node, indent: usize) void { + var stderr_file = std.io.getStdErr() catch return; + const stderr = &std.io.FileOutStream.init(&stderr_file).stream; + stderr.writeByteNTimes(' ', indent) catch return; + if (optional_node) |node| { + std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data); + dumpRecursive(node.next, indent + 1); + } else { + std.debug.warn("(null)\n"); + } + } + }; +} + +const std = @import("../index.zig"); +const assert = std.debug.assert; + +const Context = struct { + allocator: *std.mem.Allocator, + queue: *Queue(i32), + put_sum: isize, + get_sum: isize, + get_count: usize, + puts_done: u8, // TODO make this a bool +}; + +// TODO add lazy evaluated build options and then put puts_per_thread behind +// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor +// CI we would use a less aggressive setting since at 1 core, while we still +// want this test to pass, we need a smaller value since there is so much thrashing +// we would also use a less aggressive setting when running in valgrind +const puts_per_thread = 500; +const put_thread_count = 3; + +test "std.atomic.Queue" { + var direct_allocator = std.heap.DirectAllocator.init(); + defer direct_allocator.deinit(); + + var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024); + defer direct_allocator.allocator.free(plenty_of_memory); + + var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); + var a = &fixed_buffer_allocator.allocator; + + var queue = Queue(i32).init(); + var context = Context{ + .allocator = a, + .queue = &queue, + .put_sum = 0, + .get_sum = 0, + .puts_done = 0, + .get_count = 0, + }; + + var putters: [put_thread_count]*std.os.Thread = undefined; + for (putters) |*t| { + t.* = try std.os.spawnThread(&context, startPuts); + } + var getters: [put_thread_count]*std.os.Thread = undefined; + for (getters) |*t| { + t.* = try std.os.spawnThread(&context, startGets); + } + + for (putters) |t| + t.wait(); + _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + for (getters) |t| + t.wait(); + + if (context.put_sum != context.get_sum) { + std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum); + } + + if (context.get_count != puts_per_thread * put_thread_count) { + std.debug.panic( + "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", + context.get_count, + u32(puts_per_thread), + u32(put_thread_count), + ); + } +} + +fn startPuts(ctx: *Context) u8 { + var put_count: usize = puts_per_thread; + var r = std.rand.DefaultPrng.init(0xdeadbeef); + while (put_count != 0) : (put_count -= 1) { + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz + const x = @bitCast(i32, r.random.scalar(u32)); + const node = ctx.allocator.create(Queue(i32).Node{ + .next = undefined, + .data = x, + }) catch unreachable; + ctx.queue.put(node); + _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + } + return 0; +} + +fn startGets(ctx: *Context) u8 { + while (true) { + const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; + + while (ctx.queue.get()) |node| { + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz + _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); + } + + if (last) return 0; + } +} + +test "std.atomic.Queue single-threaded" { + var queue = Queue(i32).init(); + + var node_0 = Queue(i32).Node{ + .data = 0, + .next = undefined, + }; + queue.put(&node_0); + + var node_1 = Queue(i32).Node{ + .data = 1, + .next = undefined, + }; + queue.put(&node_1); + + assert(queue.get().?.data == 0); + + var node_2 = Queue(i32).Node{ + .data = 2, + .next = undefined, + }; + queue.put(&node_2); + + var node_3 = Queue(i32).Node{ + .data = 3, + .next = undefined, + }; + queue.put(&node_3); + + assert(queue.get().?.data == 1); + + assert(queue.get().?.data == 2); + + var node_4 = Queue(i32).Node{ + .data = 4, + .next = undefined, + }; + queue.put(&node_4); + + assert(queue.get().?.data == 3); + node_3.next = null; + + assert(queue.get().?.data == 4); + + assert(queue.get() == null); +} diff --git a/std/atomic/queue_mpmc.zig b/std/atomic/queue_mpmc.zig deleted file mode 100644 index 7ffc9f9ccb..0000000000 --- a/std/atomic/queue_mpmc.zig +++ /dev/null @@ -1,214 +0,0 @@ -const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; -const AtomicRmwOp = builtin.AtomicRmwOp; - -/// Many producer, many consumer, non-allocating, thread-safe, lock-free -/// This implementation has a crippling limitation - it hangs onto node -/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that -/// node must not be freed until both the next get() and the next put() completes. -pub fn QueueMpmc(comptime T: type) type { - return struct { - head: *Node, - tail: *Node, - root: Node, - - pub const Self = this; - - pub const Node = struct { - next: ?*Node, - data: T, - }; - - /// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287 - pub fn init(self: *Self) void { - self.root.next = null; - self.head = &self.root; - self.tail = &self.root; - } - - pub fn put(self: *Self, node: *Node) void { - node.next = null; - - const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); - _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); - } - - /// node must not be freed until both the next get() and the next put() complete - pub fn get(self: *Self) ?*Node { - var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst); - while (true) { - const node = head.next orelse return null; - head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node; - } - } - - ///// This is a debug function that is not thread-safe. - pub fn dump(self: *Self) void { - std.debug.warn("head: "); - dumpRecursive(self.head, 0); - std.debug.warn("tail: "); - dumpRecursive(self.tail, 0); - } - - fn dumpRecursive(optional_node: ?*Node, indent: usize) void { - var stderr_file = std.io.getStdErr() catch return; - const stderr = &std.io.FileOutStream.init(&stderr_file).stream; - stderr.writeByteNTimes(' ', indent) catch return; - if (optional_node) |node| { - std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data); - dumpRecursive(node.next, indent + 1); - } else { - std.debug.warn("(null)\n"); - } - } - }; -} - -const std = @import("std"); -const assert = std.debug.assert; - -const Context = struct { - allocator: *std.mem.Allocator, - queue: *QueueMpmc(i32), - put_sum: isize, - get_sum: isize, - get_count: usize, - puts_done: u8, // TODO make this a bool -}; - -// TODO add lazy evaluated build options and then put puts_per_thread behind -// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor -// CI we would use a less aggressive setting since at 1 core, while we still -// want this test to pass, we need a smaller value since there is so much thrashing -// we would also use a less aggressive setting when running in valgrind -const puts_per_thread = 500; -const put_thread_count = 3; - -test "std.atomic.queue_mpmc" { - var direct_allocator = std.heap.DirectAllocator.init(); - defer direct_allocator.deinit(); - - var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024); - defer direct_allocator.allocator.free(plenty_of_memory); - - var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); - var a = &fixed_buffer_allocator.allocator; - - var queue: QueueMpmc(i32) = undefined; - queue.init(); - var context = Context{ - .allocator = a, - .queue = &queue, - .put_sum = 0, - .get_sum = 0, - .puts_done = 0, - .get_count = 0, - }; - - var putters: [put_thread_count]*std.os.Thread = undefined; - for (putters) |*t| { - t.* = try std.os.spawnThread(&context, startPuts); - } - var getters: [put_thread_count]*std.os.Thread = undefined; - for (getters) |*t| { - t.* = try std.os.spawnThread(&context, startGets); - } - - for (putters) |t| - t.wait(); - _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - for (getters) |t| - t.wait(); - - if (context.put_sum != context.get_sum) { - std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum); - } - - if (context.get_count != puts_per_thread * put_thread_count) { - std.debug.panic( - "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", - context.get_count, - u32(puts_per_thread), - u32(put_thread_count), - ); - } -} - -fn startPuts(ctx: *Context) u8 { - var put_count: usize = puts_per_thread; - var r = std.rand.DefaultPrng.init(0xdeadbeef); - while (put_count != 0) : (put_count -= 1) { - std.os.time.sleep(0, 1); // let the os scheduler be our fuzz - const x = @bitCast(i32, r.random.scalar(u32)); - const node = ctx.allocator.create(QueueMpmc(i32).Node{ - .next = undefined, - .data = x, - }) catch unreachable; - ctx.queue.put(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); - } - return 0; -} - -fn startGets(ctx: *Context) u8 { - while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; - - while (ctx.queue.get()) |node| { - std.os.time.sleep(0, 1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); - } - - if (last) return 0; - } -} - -test "std.atomic.queue_mpmc single-threaded" { - var queue: QueueMpmc(i32) = undefined; - queue.init(); - - var node_0 = QueueMpmc(i32).Node{ - .data = 0, - .next = undefined, - }; - queue.put(&node_0); - - var node_1 = QueueMpmc(i32).Node{ - .data = 1, - .next = undefined, - }; - queue.put(&node_1); - - assert(queue.get().?.data == 0); - - var node_2 = QueueMpmc(i32).Node{ - .data = 2, - .next = undefined, - }; - queue.put(&node_2); - - var node_3 = QueueMpmc(i32).Node{ - .data = 3, - .next = undefined, - }; - queue.put(&node_3); - - assert(queue.get().?.data == 1); - - assert(queue.get().?.data == 2); - - var node_4 = QueueMpmc(i32).Node{ - .data = 4, - .next = undefined, - }; - queue.put(&node_4); - - assert(queue.get().?.data == 3); - // if we were to set node_3.next to null here, it would cause this test - // to fail. this demonstrates the limitation of hanging on to extra memory. - - assert(queue.get().?.data == 4); - - assert(queue.get() == null); -} diff --git a/std/atomic/queue_mpsc.zig b/std/atomic/queue_mpsc.zig deleted file mode 100644 index 978e189453..0000000000 --- a/std/atomic/queue_mpsc.zig +++ /dev/null @@ -1,185 +0,0 @@ -const std = @import("../index.zig"); -const assert = std.debug.assert; -const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; -const AtomicRmwOp = builtin.AtomicRmwOp; - -/// Many producer, single consumer, non-allocating, thread-safe, lock-free -pub fn QueueMpsc(comptime T: type) type { - return struct { - inboxes: [2]std.atomic.Stack(T), - outbox: std.atomic.Stack(T), - inbox_index: usize, - - pub const Self = this; - - pub const Node = std.atomic.Stack(T).Node; - - /// Not thread-safe. The call to init() must complete before any other functions are called. - /// No deinitialization required. - pub fn init() Self { - return Self{ - .inboxes = []std.atomic.Stack(T){ - std.atomic.Stack(T).init(), - std.atomic.Stack(T).init(), - }, - .outbox = std.atomic.Stack(T).init(), - .inbox_index = 0, - }; - } - - /// Fully thread-safe. put() may be called from any thread at any time. - pub fn put(self: *Self, node: *Node) void { - const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst); - const inbox = &self.inboxes[inbox_index]; - inbox.push(node); - } - - /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before - /// the next call to get(). - pub fn get(self: *Self) ?*Node { - if (self.outbox.pop()) |node| { - return node; - } - const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst); - const prev_inbox = &self.inboxes[prev_inbox_index]; - while (prev_inbox.pop()) |node| { - self.outbox.push(node); - } - return self.outbox.pop(); - } - - /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before - /// the next call to isEmpty(). - pub fn isEmpty(self: *Self) bool { - if (!self.outbox.isEmpty()) return false; - const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst); - const prev_inbox = &self.inboxes[prev_inbox_index]; - while (prev_inbox.pop()) |node| { - self.outbox.push(node); - } - return self.outbox.isEmpty(); - } - - /// For debugging only. No API guarantees about what this does. - pub fn dump(self: *Self) void { - { - var it = self.outbox.root; - while (it) |node| { - std.debug.warn("0x{x} -> ", @ptrToInt(node)); - it = node.next; - } - } - const inbox_index = self.inbox_index; - const inboxes = []*std.atomic.Stack(T){ - &self.inboxes[self.inbox_index], - &self.inboxes[1 - self.inbox_index], - }; - for (inboxes) |inbox| { - var it = inbox.root; - while (it) |node| { - std.debug.warn("0x{x} -> ", @ptrToInt(node)); - it = node.next; - } - } - - std.debug.warn("null\n"); - } - }; -} - -const Context = struct { - allocator: *std.mem.Allocator, - queue: *QueueMpsc(i32), - put_sum: isize, - get_sum: isize, - get_count: usize, - puts_done: u8, // TODO make this a bool -}; - -// TODO add lazy evaluated build options and then put puts_per_thread behind -// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor -// CI we would use a less aggressive setting since at 1 core, while we still -// want this test to pass, we need a smaller value since there is so much thrashing -// we would also use a less aggressive setting when running in valgrind -const puts_per_thread = 500; -const put_thread_count = 3; - -test "std.atomic.queue_mpsc" { - var direct_allocator = std.heap.DirectAllocator.init(); - defer direct_allocator.deinit(); - - var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024); - defer direct_allocator.allocator.free(plenty_of_memory); - - var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); - var a = &fixed_buffer_allocator.allocator; - - var queue = QueueMpsc(i32).init(); - var context = Context{ - .allocator = a, - .queue = &queue, - .put_sum = 0, - .get_sum = 0, - .puts_done = 0, - .get_count = 0, - }; - - var putters: [put_thread_count]*std.os.Thread = undefined; - for (putters) |*t| { - t.* = try std.os.spawnThread(&context, startPuts); - } - var getters: [1]*std.os.Thread = undefined; - for (getters) |*t| { - t.* = try std.os.spawnThread(&context, startGets); - } - - for (putters) |t| - t.wait(); - _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - for (getters) |t| - t.wait(); - - if (context.put_sum != context.get_sum) { - std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum); - } - - if (context.get_count != puts_per_thread * put_thread_count) { - std.debug.panic( - "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", - context.get_count, - u32(puts_per_thread), - u32(put_thread_count), - ); - } -} - -fn startPuts(ctx: *Context) u8 { - var put_count: usize = puts_per_thread; - var r = std.rand.DefaultPrng.init(0xdeadbeef); - while (put_count != 0) : (put_count -= 1) { - std.os.time.sleep(0, 1); // let the os scheduler be our fuzz - const x = @bitCast(i32, r.random.scalar(u32)); - const node = ctx.allocator.create(QueueMpsc(i32).Node{ - .next = undefined, - .data = x, - }) catch unreachable; - ctx.queue.put(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); - } - return 0; -} - -fn startGets(ctx: *Context) u8 { - while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; - - while (ctx.queue.get()) |node| { - std.os.time.sleep(0, 1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); - } - - if (last) return 0; - } -} diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig index d74bee8e8b..16d5c9503b 100644 --- a/std/atomic/stack.zig +++ b/std/atomic/stack.zig @@ -1,10 +1,13 @@ +const assert = std.debug.assert; const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; -/// Many reader, many writer, non-allocating, thread-safe, lock-free +/// Many reader, many writer, non-allocating, thread-safe +/// Uses a spinlock to protect push() and pop() pub fn Stack(comptime T: type) type { return struct { root: ?*Node, + lock: u8, pub const Self = this; @@ -14,7 +17,10 @@ pub fn Stack(comptime T: type) type { }; pub fn init() Self { - return Self{ .root = null }; + return Self{ + .root = null, + .lock = 0, + }; } /// push operation, but only if you are the first item in the stack. if you did not succeed in @@ -25,18 +31,20 @@ pub fn Stack(comptime T: type) type { } pub fn push(self: *Self, node: *Node) void { - var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); - while (true) { - node.next = root; - root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break; - } + while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + + node.next = self.root; + self.root = node; } pub fn pop(self: *Self) ?*Node { - var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); - while (true) { - root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root; - } + while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + + const root = self.root orelse return null; + self.root = root.next; + return root; } pub fn isEmpty(self: *Self) bool { @@ -45,7 +53,7 @@ pub fn Stack(comptime T: type) type { }; } -const std = @import("std"); +const std = @import("../index.zig"); const Context = struct { allocator: *std.mem.Allocator, stack: *Stack(i32), diff --git a/std/event/channel.zig b/std/event/channel.zig index 4b3a7177a2..d4d713bdee 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -12,8 +12,8 @@ pub fn Channel(comptime T: type) type { return struct { loop: *Loop, - getters: std.atomic.QueueMpsc(GetNode), - putters: std.atomic.QueueMpsc(PutNode), + getters: std.atomic.Queue(GetNode), + putters: std.atomic.Queue(PutNode), get_count: usize, put_count: usize, dispatch_lock: u8, // TODO make this a bool @@ -46,8 +46,8 @@ pub fn Channel(comptime T: type) type { .buffer_index = 0, .dispatch_lock = 0, .need_dispatch = 0, - .getters = std.atomic.QueueMpsc(GetNode).init(), - .putters = std.atomic.QueueMpsc(PutNode).init(), + .getters = std.atomic.Queue(GetNode).init(), + .putters = std.atomic.Queue(PutNode).init(), .get_count = 0, .put_count = 0, }); @@ -81,7 +81,7 @@ pub fn Channel(comptime T: type) type { .next = undefined, .data = handle, }; - var queue_node = std.atomic.QueueMpsc(PutNode).Node{ + var queue_node = std.atomic.Queue(PutNode).Node{ .data = PutNode{ .tick_node = &my_tick_node, .data = data, @@ -111,7 +111,7 @@ pub fn Channel(comptime T: type) type { .next = undefined, .data = handle, }; - var queue_node = std.atomic.QueueMpsc(GetNode).Node{ + var queue_node = std.atomic.Queue(GetNode).Node{ .data = GetNode{ .ptr = &result, .tick_node = &my_tick_node, diff --git a/std/event/future.zig b/std/event/future.zig index 8001f675a2..6c03641828 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -17,7 +17,7 @@ pub fn Future(comptime T: type) type { available: u8, // TODO make this a bool const Self = this; - const Queue = std.atomic.QueueMpsc(promise); + const Queue = std.atomic.Queue(promise); pub fn init(loop: *Loop) Self { return Self{ @@ -30,19 +30,19 @@ pub fn Future(comptime T: type) type { /// Obtain the value. If it's not available, wait until it becomes /// available. /// Thread-safe. - pub async fn get(self: *Self) T { + pub async fn get(self: *Self) *T { if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) { - return self.data; + return &self.data; } const held = await (async self.lock.acquire() catch unreachable); - defer held.release(); + held.release(); - return self.data; + return &self.data; } /// Make the data become available. May be called only once. - pub fn put(self: *Self, value: T) void { - self.data = value; + /// Before calling this, modify the `data` property. + pub fn resolve(self: *Self) void { const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); assert(prev == 0); // put() called twice Lock.Held.release(Lock.Held{ .lock = &self.lock }); @@ -57,7 +57,7 @@ test "std.event.Future" { const allocator = &da.allocator; var loop: Loop = undefined; - try loop.initMultiThreaded(allocator); + try loop.initSingleThreaded(allocator); defer loop.deinit(); const handle = try async testFuture(&loop); @@ -79,9 +79,10 @@ async fn testFuture(loop: *Loop) void { } async fn waitOnFuture(future: *Future(i32)) i32 { - return await (async future.get() catch @panic("memory")); + return (await (async future.get() catch @panic("memory"))).*; } async fn resolveFuture(future: *Future(i32)) void { - future.put(6); + future.data = 6; + future.resolve(); } diff --git a/std/event/lock.zig b/std/event/lock.zig index cba3594b50..2013b5595f 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -15,7 +15,7 @@ pub const Lock = struct { queue: Queue, queue_empty_bit: u8, // TODO make this a bool - const Queue = std.atomic.QueueMpsc(promise); + const Queue = std.atomic.Queue(promise); pub const Held = struct { lock: *Lock, diff --git a/std/event/loop.zig b/std/event/loop.zig index 646f15875f..07575cf2e8 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -9,7 +9,7 @@ const AtomicOrder = builtin.AtomicOrder; pub const Loop = struct { allocator: *mem.Allocator, - next_tick_queue: std.atomic.QueueMpsc(promise), + next_tick_queue: std.atomic.Queue(promise), os_data: OsData, final_resume_node: ResumeNode, dispatch_lock: u8, // TODO make this a bool @@ -21,7 +21,7 @@ pub const Loop = struct { available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd), eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node, - pub const NextTickNode = std.atomic.QueueMpsc(promise).Node; + pub const NextTickNode = std.atomic.Queue(promise).Node; pub const ResumeNode = struct { id: Id, @@ -77,7 +77,7 @@ pub const Loop = struct { .pending_event_count = 0, .allocator = allocator, .os_data = undefined, - .next_tick_queue = std.atomic.QueueMpsc(promise).init(), + .next_tick_queue = std.atomic.Queue(promise).init(), .dispatch_lock = 1, // start locked so threads go directly into epoll wait .extra_threads = undefined, .available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(), diff --git a/test/tests.zig b/test/tests.zig index b1453776a8..3a72f58753 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -47,12 +47,13 @@ const test_targets = []TestTarget{ const max_stdout_size = 1 * 1024 * 1024; // 1 MB -pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { +pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step { const cases = b.allocator.create(CompareOutputContext{ .b = b, .step = b.step("test-compare-output", "Run the compare output tests"), .test_index = 0, .test_filter = test_filter, + .modes = modes, }) catch unreachable; compare_output.addCases(cases); @@ -60,12 +61,13 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build return cases.step; } -pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { +pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step { const cases = b.allocator.create(CompareOutputContext{ .b = b, .step = b.step("test-runtime-safety", "Run the runtime safety tests"), .test_index = 0, .test_filter = test_filter, + .modes = modes, }) catch unreachable; runtime_safety.addCases(cases); @@ -73,12 +75,13 @@ pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build return cases.step; } -pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { +pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step { const cases = b.allocator.create(CompileErrorContext{ .b = b, .step = b.step("test-compile-errors", "Run the compile error tests"), .test_index = 0, .test_filter = test_filter, + .modes = modes, }) catch unreachable; compile_errors.addCases(cases); @@ -99,12 +102,13 @@ pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build. return cases.step; } -pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { +pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step { const cases = b.allocator.create(CompareOutputContext{ .b = b, .step = b.step("test-asm-link", "Run the assemble and link tests"), .test_index = 0, .test_filter = test_filter, + .modes = modes, }) catch unreachable; assemble_and_link.addCases(cases); @@ -173,6 +177,7 @@ pub const CompareOutputContext = struct { step: *build.Step, test_index: usize, test_filter: ?[]const u8, + modes: []const Mode, const Special = enum { None, @@ -423,12 +428,7 @@ pub const CompareOutputContext = struct { self.step.dependOn(&run_and_cmp_output.step); }, Special.None => { - for ([]Mode{ - Mode.Debug, - Mode.ReleaseSafe, - Mode.ReleaseFast, - Mode.ReleaseSmall, - }) |mode| { + for (self.modes) |mode| { const annotated_case_name = fmt.allocPrint(self.b.allocator, "{} {} ({})", "compare-output", case.name, @tagName(mode)) catch unreachable; if (self.test_filter) |filter| { if (mem.indexOf(u8, annotated_case_name, filter) == null) continue; @@ -483,6 +483,7 @@ pub const CompileErrorContext = struct { step: *build.Step, test_index: usize, test_filter: ?[]const u8, + modes: []const Mode, const TestCase = struct { name: []const u8, @@ -673,10 +674,7 @@ pub const CompileErrorContext = struct { pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void { const b = self.b; - for ([]Mode{ - Mode.Debug, - Mode.ReleaseFast, - }) |mode| { + for (self.modes) |mode| { const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {} ({})", case.name, @tagName(mode)) catch unreachable; if (self.test_filter) |filter| { if (mem.indexOf(u8, annotated_case_name, filter) == null) continue; -- cgit v1.2.3 From 30c4add85a0f4af727ad7cf8f2134114329d0f07 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 11 Jul 2018 20:17:47 -0400 Subject: std.event.Future: workaround in tests for llvm coro memory See #1194 --- std/event.zig | 2 +- std/event/future.zig | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'std/event/future.zig') diff --git a/std/event.zig b/std/event.zig index f3913a432b..1e52086286 100644 --- a/std/event.zig +++ b/std/event.zig @@ -4,7 +4,7 @@ pub const Lock = @import("event/lock.zig").Lock; pub const tcp = @import("event/tcp.zig"); pub const Channel = @import("event/channel.zig").Channel; pub const Group = @import("event/group.zig").Group; -pub const Future = @import("event/future.zig").Group; +pub const Future = @import("event/future.zig").Future; test "import event tests" { _ = @import("event/locked.zig"); diff --git a/std/event/future.zig b/std/event/future.zig index 6c03641828..b6ec861f77 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -67,6 +67,9 @@ test "std.event.Future" { } async fn testFuture(loop: *Loop) void { + suspend |p| { + resume p; + } var future = Future(i32).init(loop); const a = async waitOnFuture(&future) catch @panic("memory"); @@ -79,10 +82,16 @@ async fn testFuture(loop: *Loop) void { } async fn waitOnFuture(future: *Future(i32)) i32 { + suspend |p| { + resume p; + } return (await (async future.get() catch @panic("memory"))).*; } async fn resolveFuture(future: *Future(i32)) void { + suspend |p| { + resume p; + } future.data = 6; future.resolve(); } -- cgit v1.2.3 From 687bd92f9c3d9f521c8fe5884627ef1b00320364 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 12 Jul 2018 15:08:40 -0400 Subject: self-hosted: generate zig IR for simple function no tests for this yet. I think the quickest path to testing will be creating the .o files and linking with libc, executing, and then comparing output. --- src-self-hosted/decl.zig | 96 ++++++ src-self-hosted/ir.zig | 745 ++++++++++++++++++++++++++++++++++------ src-self-hosted/module.zig | 383 ++++++++++----------- src-self-hosted/parsed_file.zig | 6 + src-self-hosted/scope.zig | 230 ++++++++++++- src-self-hosted/type.zig | 268 +++++++++++++++ src-self-hosted/value.zig | 125 +++++++ src-self-hosted/visib.zig | 4 + std/event/future.zig | 2 +- std/zig/ast.zig | 6 - std/zig/parse.zig | 5 - 11 files changed, 1555 insertions(+), 315 deletions(-) create mode 100644 src-self-hosted/decl.zig create mode 100644 src-self-hosted/parsed_file.zig create mode 100644 src-self-hosted/type.zig create mode 100644 src-self-hosted/value.zig create mode 100644 src-self-hosted/visib.zig (limited to 'std/event/future.zig') diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig new file mode 100644 index 0000000000..1a75a3249e --- /dev/null +++ b/src-self-hosted/decl.zig @@ -0,0 +1,96 @@ +const std = @import("std"); +const Allocator = mem.Allocator; +const mem = std.mem; +const ast = std.zig.ast; +const Visib = @import("visib.zig").Visib; +const ParsedFile = @import("parsed_file.zig").ParsedFile; +const event = std.event; +const Value = @import("value.zig").Value; +const Token = std.zig.Token; +const errmsg = @import("errmsg.zig"); +const Scope = @import("scope.zig").Scope; +const Module = @import("module.zig").Module; + +pub const Decl = struct { + id: Id, + name: []const u8, + visib: Visib, + resolution: event.Future(Module.BuildError!void), + resolution_in_progress: u8, + parsed_file: *ParsedFile, + parent_scope: *Scope, + + pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8); + + pub fn isExported(base: *const Decl, tree: *ast.Tree) bool { + switch (base.id) { + Id.Fn => { + const fn_decl = @fieldParentPtr(Fn, "base", base); + return fn_decl.isExported(tree); + }, + else => return false, + } + } + + pub fn getSpan(base: *const Decl) errmsg.Span { + switch (base.id) { + Id.Fn => { + const fn_decl = @fieldParentPtr(Fn, "base", base); + const fn_proto = fn_decl.fn_proto; + const start = fn_proto.fn_token; + const end = fn_proto.name_token orelse start; + return errmsg.Span{ + .first = start, + .last = end + 1, + }; + }, + else => @panic("TODO"), + } + } + + pub const Id = enum { + Var, + Fn, + CompTime, + }; + + pub const Var = struct { + base: Decl, + }; + + pub const Fn = struct { + base: Decl, + value: Val, + fn_proto: *const ast.Node.FnProto, + + // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous + pub const Val = union { + Unresolved: void, + Ok: *Value.Fn, + }; + + pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 { + return if (self.fn_proto.extern_export_inline_token) |tok_index| x: { + const token = tree.tokens.at(tok_index); + break :x switch (token.id) { + Token.Id.Extern => tree.tokenSlicePtr(token), + else => null, + }; + } else null; + } + + pub fn isExported(self: Fn, tree: *ast.Tree) bool { + if (self.fn_proto.extern_export_inline_token) |tok_index| { + const token = tree.tokens.at(tok_index); + return token.id == Token.Id.Keyword_export; + } else { + return false; + } + } + }; + + pub const CompTime = struct { + base: Decl, + }; +}; + diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 3334d9511b..f517dfe579 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -1,111 +1,656 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Module = @import("module.zig").Module; const Scope = @import("scope.zig").Scope; +const ast = std.zig.ast; +const Allocator = std.mem.Allocator; +const Value = @import("value.zig").Value; +const Type = Value.Type; +const assert = std.debug.assert; +const Token = std.zig.Token; +const ParsedFile = @import("parsed_file.zig").ParsedFile; + +pub const LVal = enum { + None, + Ptr, +}; + +pub const Mut = enum { + Mut, + Const, +}; + +pub const Volatility = enum { + NonVolatile, + Volatile, +}; + +pub const IrVal = union(enum) { + Unknown, + Known: *Value, + + pub fn dump(self: IrVal) void { + switch (self) { + IrVal.Unknown => std.debug.warn("Unknown"), + IrVal.Known => |value| { + std.debug.warn("Known("); + value.dump(); + std.debug.warn(")"); + }, + } + } +}; pub const Instruction = struct { id: Id, scope: *Scope, + debug_id: usize, + val: IrVal, + + /// true if this instruction was generated by zig and not from user code + is_generated: bool, + + pub fn cast(base: *Instruction, comptime T: type) ?*T { + if (base.id == comptime typeToId(T)) { + return @fieldParentPtr(T, "base", base); + } + return null; + } + + pub fn typeToId(comptime T: type) Id { + comptime var i = 0; + inline while (i < @memberCount(Id)) : (i += 1) { + if (T == @field(Instruction, @memberName(Id, i))) { + return @field(Id, @memberName(Id, i)); + } + } + unreachable; + } + + pub fn dump(base: *const Instruction) void { + comptime var i = 0; + inline while (i < @memberCount(Id)) : (i += 1) { + if (base.id == @field(Id, @memberName(Id, i))) { + const T = @field(Instruction, @memberName(Id, i)); + std.debug.warn("#{} = {}(", base.debug_id, @tagName(base.id)); + @fieldParentPtr(T, "base", base).dump(); + std.debug.warn(")"); + return; + } + } + unreachable; + } + + pub fn setGenerated(base: *Instruction) void { + base.is_generated = true; + } + + pub fn isNoReturn(base: *const Instruction) bool { + switch (base.val) { + IrVal.Unknown => return false, + IrVal.Known => |x| return x.typeof.id == Type.Id.NoReturn, + } + } pub const Id = enum { - Br, - CondBr, - SwitchBr, - SwitchVar, - SwitchTarget, - Phi, - UnOp, - BinOp, - DeclVar, - LoadPtr, - StorePtr, - FieldPtr, - StructFieldPtr, - UnionFieldPtr, - ElemPtr, - VarPtr, - Call, - Const, Return, - Cast, - ContainerInitList, - ContainerInitFields, - StructInit, - UnionInit, - Unreachable, - TypeOf, - ToPtrType, - PtrTypeChild, - SetRuntimeSafety, - SetFloatMode, - ArrayType, - SliceType, - Asm, - SizeOf, - TestNonNull, - UnwrapMaybe, - MaybeWrap, - UnionTag, - Clz, - Ctz, - Import, - CImport, - CInclude, - CDefine, - CUndef, - ArrayLen, + Const, Ref, - MinValue, - MaxValue, - CompileErr, - CompileLog, - ErrName, - EmbedFile, - Cmpxchg, - Fence, - Truncate, - IntType, - BoolNot, - Memset, - Memcpy, - Slice, - MemberCount, - MemberType, - MemberName, - Breakpoint, - ReturnAddress, - FrameAddress, - AlignOf, - OverflowOp, - TestErr, - UnwrapErrCode, - UnwrapErrPayload, - ErrWrapCode, - ErrWrapPayload, - FnProto, - TestComptime, - PtrCast, - BitCast, - WidenOrShorten, - IntToPtr, - PtrToInt, - IntToEnum, - IntToErr, - ErrToInt, - CheckSwitchProngs, - CheckStatementIsVoid, - TypeName, - CanImplicitCast, - DeclRef, - Panic, - TagName, - TagType, - FieldParentPtr, - OffsetOf, - TypeId, - SetEvalBranchQuota, - PtrTypeOf, - AlignCast, - OpaqueType, - SetAlignStack, - ArgType, - Export, + DeclVar, + CheckVoidStmt, + Phi, + Br, + }; + + pub const Const = struct { + base: Instruction, + + pub fn buildBool(irb: *Builder, scope: *Scope, val: bool) !*Instruction { + const inst = try irb.arena().create(Const{ + .base = Instruction{ + .id = Instruction.Id.Const, + .is_generated = false, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal{ .Known = &Value.Bool.get(irb.module, val).base }, + }, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn buildVoid(irb: *Builder, scope: *Scope, is_generated: bool) !*Instruction { + const inst = try irb.arena().create(Const{ + .base = Instruction{ + .id = Instruction.Id.Const, + .is_generated = is_generated, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal{ .Known = &Value.Void.get(irb.module).base }, + }, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const Const) void { + inst.base.val.Known.dump(); + } + }; + + pub const Return = struct { + base: Instruction, + return_value: *Instruction, + + pub fn build(irb: *Builder, scope: *Scope, return_value: *Instruction) !*Instruction { + const inst = try irb.arena().create(Return{ + .base = Instruction{ + .id = Instruction.Id.Return, + .is_generated = false, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal{ .Known = &Value.Void.get(irb.module).base }, + }, + .return_value = return_value, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const Return) void { + std.debug.warn("#{}", inst.return_value.debug_id); + } + }; + + pub const Ref = struct { + base: Instruction, + target: *Instruction, + mut: Mut, + volatility: Volatility, + + pub fn build( + irb: *Builder, + scope: *Scope, + target: *Instruction, + mut: Mut, + volatility: Volatility, + ) !*Instruction { + const inst = try irb.arena().create(Ref{ + .base = Instruction{ + .id = Instruction.Id.Ref, + .is_generated = false, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal.Unknown, + }, + .target = target, + .mut = mut, + .volatility = volatility, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const Ref) void {} + }; + + pub const DeclVar = struct { + base: Instruction, + variable: *Variable, + + pub fn dump(inst: *const DeclVar) void {} + }; + + pub const CheckVoidStmt = struct { + base: Instruction, + target: *Instruction, + + pub fn build( + irb: *Builder, + scope: *Scope, + target: *Instruction, + ) !*Instruction { + const inst = try irb.arena().create(CheckVoidStmt{ + .base = Instruction{ + .id = Instruction.Id.CheckVoidStmt, + .is_generated = true, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal{ .Known = &Value.Void.get(irb.module).base }, + }, + .target = target, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const CheckVoidStmt) void {} + }; + + pub const Phi = struct { + base: Instruction, + incoming_blocks: []*BasicBlock, + incoming_values: []*Instruction, + + pub fn build( + irb: *Builder, + scope: *Scope, + incoming_blocks: []*BasicBlock, + incoming_values: []*Instruction, + ) !*Instruction { + const inst = try irb.arena().create(Phi{ + .base = Instruction{ + .id = Instruction.Id.Phi, + .is_generated = false, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal.Unknown, + }, + .incoming_blocks = incoming_blocks, + .incoming_values = incoming_values, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const Phi) void {} + }; + + pub const Br = struct { + base: Instruction, + dest_block: *BasicBlock, + is_comptime: *Instruction, + + pub fn build( + irb: *Builder, + scope: *Scope, + dest_block: *BasicBlock, + is_comptime: *Instruction, + ) !*Instruction { + const inst = try irb.arena().create(Br{ + .base = Instruction{ + .id = Instruction.Id.Br, + .is_generated = false, + .scope = scope, + .debug_id = irb.next_debug_id, + .val = IrVal{ .Known = &Value.NoReturn.get(irb.module).base }, + }, + .dest_block = dest_block, + .is_comptime = is_comptime, + }); + irb.next_debug_id += 1; + try irb.current_basic_block.instruction_list.append(&inst.base); + return &inst.base; + } + + pub fn dump(inst: *const Br) void {} }; }; + +pub const Variable = struct { + child_scope: *Scope, +}; + +pub const BasicBlock = struct { + ref_count: usize, + name_hint: []const u8, + debug_id: usize, + scope: *Scope, + instruction_list: std.ArrayList(*Instruction), + + pub fn ref(self: *BasicBlock) void { + self.ref_count += 1; + } +}; + +/// Stuff that survives longer than Builder +pub const Code = struct { + basic_block_list: std.ArrayList(*BasicBlock), + arena: std.heap.ArenaAllocator, + + /// allocator is module.a() + pub fn destroy(self: *Code, allocator: *Allocator) void { + self.arena.deinit(); + allocator.destroy(self); + } + + pub fn dump(self: *Code) void { + var bb_i: usize = 0; + for (self.basic_block_list.toSliceConst()) |bb| { + std.debug.warn("{}_{}:\n", bb.name_hint, bb.debug_id); + for (bb.instruction_list.toSliceConst()) |instr| { + std.debug.warn(" "); + instr.dump(); + std.debug.warn("\n"); + } + } + } +}; + +pub const Builder = struct { + module: *Module, + code: *Code, + current_basic_block: *BasicBlock, + next_debug_id: usize, + parsed_file: *ParsedFile, + is_comptime: bool, + + pub const Error = error{ + OutOfMemory, + Unimplemented, + }; + + pub fn init(module: *Module, parsed_file: *ParsedFile) !Builder { + const code = try module.a().create(Code{ + .basic_block_list = undefined, + .arena = std.heap.ArenaAllocator.init(module.a()), + }); + code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator); + errdefer code.destroy(module.a()); + + return Builder{ + .module = module, + .parsed_file = parsed_file, + .current_basic_block = undefined, + .code = code, + .next_debug_id = 0, + .is_comptime = false, + }; + } + + pub fn abort(self: *Builder) void { + self.code.destroy(self.module.a()); + } + + /// Call code.destroy() when done + pub fn finish(self: *Builder) *Code { + return self.code; + } + + /// No need to clean up resources thanks to the arena allocator. + pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: []const u8) !*BasicBlock { + const basic_block = try self.arena().create(BasicBlock{ + .ref_count = 0, + .name_hint = name_hint, + .debug_id = self.next_debug_id, + .scope = scope, + .instruction_list = std.ArrayList(*Instruction).init(self.arena()), + }); + self.next_debug_id += 1; + return basic_block; + } + + pub fn setCursorAtEndAndAppendBlock(self: *Builder, basic_block: *BasicBlock) !void { + try self.code.basic_block_list.append(basic_block); + self.setCursorAtEnd(basic_block); + } + + pub fn setCursorAtEnd(self: *Builder, basic_block: *BasicBlock) void { + self.current_basic_block = basic_block; + } + + pub fn genNode(irb: *Builder, node: *ast.Node, scope: *Scope, lval: LVal) Error!*Instruction { + switch (node.id) { + ast.Node.Id.Root => unreachable, + ast.Node.Id.Use => unreachable, + ast.Node.Id.TestDecl => unreachable, + ast.Node.Id.VarDecl => @panic("TODO"), + ast.Node.Id.Defer => @panic("TODO"), + ast.Node.Id.InfixOp => @panic("TODO"), + ast.Node.Id.PrefixOp => @panic("TODO"), + ast.Node.Id.SuffixOp => @panic("TODO"), + ast.Node.Id.Switch => @panic("TODO"), + ast.Node.Id.While => @panic("TODO"), + ast.Node.Id.For => @panic("TODO"), + ast.Node.Id.If => @panic("TODO"), + ast.Node.Id.ControlFlowExpression => return error.Unimplemented, + ast.Node.Id.Suspend => @panic("TODO"), + ast.Node.Id.VarType => @panic("TODO"), + ast.Node.Id.ErrorType => @panic("TODO"), + ast.Node.Id.FnProto => @panic("TODO"), + ast.Node.Id.PromiseType => @panic("TODO"), + ast.Node.Id.IntegerLiteral => @panic("TODO"), + ast.Node.Id.FloatLiteral => @panic("TODO"), + ast.Node.Id.StringLiteral => @panic("TODO"), + ast.Node.Id.MultilineStringLiteral => @panic("TODO"), + ast.Node.Id.CharLiteral => @panic("TODO"), + ast.Node.Id.BoolLiteral => @panic("TODO"), + ast.Node.Id.NullLiteral => @panic("TODO"), + ast.Node.Id.UndefinedLiteral => @panic("TODO"), + ast.Node.Id.ThisLiteral => @panic("TODO"), + ast.Node.Id.Unreachable => @panic("TODO"), + ast.Node.Id.Identifier => @panic("TODO"), + ast.Node.Id.GroupedExpression => { + const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", node); + return irb.genNode(grouped_expr.expr, scope, lval); + }, + ast.Node.Id.BuiltinCall => @panic("TODO"), + ast.Node.Id.ErrorSetDecl => @panic("TODO"), + ast.Node.Id.ContainerDecl => @panic("TODO"), + ast.Node.Id.Asm => @panic("TODO"), + ast.Node.Id.Comptime => @panic("TODO"), + ast.Node.Id.Block => { + const block = @fieldParentPtr(ast.Node.Block, "base", node); + return irb.lvalWrap(scope, try irb.genBlock(block, scope), lval); + }, + ast.Node.Id.DocComment => @panic("TODO"), + ast.Node.Id.SwitchCase => @panic("TODO"), + ast.Node.Id.SwitchElse => @panic("TODO"), + ast.Node.Id.Else => @panic("TODO"), + ast.Node.Id.Payload => @panic("TODO"), + ast.Node.Id.PointerPayload => @panic("TODO"), + ast.Node.Id.PointerIndexPayload => @panic("TODO"), + ast.Node.Id.StructField => @panic("TODO"), + ast.Node.Id.UnionTag => @panic("TODO"), + ast.Node.Id.EnumTag => @panic("TODO"), + ast.Node.Id.ErrorTag => @panic("TODO"), + ast.Node.Id.AsmInput => @panic("TODO"), + ast.Node.Id.AsmOutput => @panic("TODO"), + ast.Node.Id.AsyncAttribute => @panic("TODO"), + ast.Node.Id.ParamDecl => @panic("TODO"), + ast.Node.Id.FieldInitializer => @panic("TODO"), + } + } + + fn isCompTime(irb: *Builder, target_scope: *Scope) bool { + if (irb.is_comptime) + return true; + + var scope = target_scope; + while (true) { + switch (scope.id) { + Scope.Id.CompTime => return true, + Scope.Id.FnDef => return false, + Scope.Id.Decls => unreachable, + Scope.Id.Block, + Scope.Id.Defer, + Scope.Id.DeferExpr, + => scope = scope.parent orelse return false, + } + } + } + + pub fn genBlock(irb: *Builder, block: *ast.Node.Block, parent_scope: *Scope) !*Instruction { + const block_scope = try Scope.Block.create(irb.module, parent_scope); + + const outer_block_scope = &block_scope.base; + var child_scope = outer_block_scope; + + if (parent_scope.findFnDef()) |fndef_scope| { + if (fndef_scope.fn_val.child_scope == parent_scope) { + fndef_scope.fn_val.block_scope = block_scope; + } + } + + if (block.statements.len == 0) { + // {} + return Instruction.Const.buildVoid(irb, child_scope, false); + } + + if (block.label) |label| { + block_scope.incoming_values = std.ArrayList(*Instruction).init(irb.arena()); + block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena()); + block_scope.end_block = try irb.createBasicBlock(parent_scope, "BlockEnd"); + block_scope.is_comptime = try Instruction.Const.buildBool(irb, parent_scope, irb.isCompTime(parent_scope)); + } + + var is_continuation_unreachable = false; + var noreturn_return_value: ?*Instruction = null; + + var stmt_it = block.statements.iterator(0); + while (stmt_it.next()) |statement_node_ptr| { + const statement_node = statement_node_ptr.*; + + if (statement_node.cast(ast.Node.Defer)) |defer_node| { + // defer starts a new scope + const defer_token = irb.parsed_file.tree.tokens.at(defer_node.defer_token); + const kind = switch (defer_token.id) { + Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit, + Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit, + else => unreachable, + }; + const defer_expr_scope = try Scope.DeferExpr.create(irb.module, parent_scope, defer_node.expr); + const defer_child_scope = try Scope.Defer.create(irb.module, parent_scope, kind, defer_expr_scope); + child_scope = &defer_child_scope.base; + continue; + } + const statement_value = try irb.genNode(statement_node, child_scope, LVal.None); + + is_continuation_unreachable = statement_value.isNoReturn(); + if (is_continuation_unreachable) { + // keep the last noreturn statement value around in case we need to return it + noreturn_return_value = statement_value; + } + + if (statement_value.cast(Instruction.DeclVar)) |decl_var| { + // variable declarations start a new scope + child_scope = decl_var.variable.child_scope; + } else if (!is_continuation_unreachable) { + // this statement's value must be void + _ = Instruction.CheckVoidStmt.build(irb, child_scope, statement_value); + } + } + + if (is_continuation_unreachable) { + assert(noreturn_return_value != null); + if (block.label == null or block_scope.incoming_blocks.len == 0) { + return noreturn_return_value.?; + } + + try irb.setCursorAtEndAndAppendBlock(block_scope.end_block); + return Instruction.Phi.build( + irb, + parent_scope, + block_scope.incoming_blocks.toOwnedSlice(), + block_scope.incoming_values.toOwnedSlice(), + ); + } + + if (block.label) |label| { + try block_scope.incoming_blocks.append(irb.current_basic_block); + try block_scope.incoming_values.append( + try Instruction.Const.buildVoid(irb, parent_scope, true), + ); + _ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit); + (try Instruction.Br.build( + irb, + parent_scope, + block_scope.end_block, + block_scope.is_comptime, + )).setGenerated(); + try irb.setCursorAtEndAndAppendBlock(block_scope.end_block); + return Instruction.Phi.build( + irb, + parent_scope, + block_scope.incoming_blocks.toOwnedSlice(), + block_scope.incoming_values.toOwnedSlice(), + ); + } + + _ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit); + const result = try Instruction.Const.buildVoid(irb, child_scope, false); + result.setGenerated(); + return result; + } + + fn genDefersForBlock( + irb: *Builder, + inner_scope: *Scope, + outer_scope: *Scope, + gen_kind: Scope.Defer.Kind, + ) !bool { + var scope = inner_scope; + var is_noreturn = false; + while (true) { + switch (scope.id) { + Scope.Id.Defer => { + const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope); + const generate = switch (defer_scope.kind) { + Scope.Defer.Kind.ScopeExit => true, + Scope.Defer.Kind.ErrorExit => gen_kind == Scope.Defer.Kind.ErrorExit, + }; + if (generate) { + const defer_expr_scope = defer_scope.defer_expr_scope; + const instruction = try irb.genNode( + defer_expr_scope.expr_node, + &defer_expr_scope.base, + LVal.None, + ); + if (instruction.isNoReturn()) { + is_noreturn = true; + } else { + _ = Instruction.CheckVoidStmt.build(irb, &defer_expr_scope.base, instruction); + } + } + }, + Scope.Id.FnDef, + Scope.Id.Decls, + => return is_noreturn, + + Scope.Id.CompTime, + Scope.Id.Block, + => scope = scope.parent orelse return is_noreturn, + + Scope.Id.DeferExpr => unreachable, + } + } + } + + pub fn lvalWrap(irb: *Builder, scope: *Scope, instruction: *Instruction, lval: LVal) !*Instruction { + switch (lval) { + LVal.None => return instruction, + LVal.Ptr => { + // We needed a pointer to a value, but we got a value. So we create + // an instruction which just makes a const pointer of it. + return Instruction.Ref.build(irb, scope, instruction, Mut.Const, Volatility.NonVolatile); + }, + } + } + + fn arena(self: *Builder) *Allocator { + return &self.code.arena.allocator; + } +}; + +pub async fn gen(module: *Module, body_node: *ast.Node, scope: *Scope, parsed_file: *ParsedFile) !*Code { + var irb = try Builder.init(module, parsed_file); + errdefer irb.abort(); + + const entry_block = try irb.createBasicBlock(scope, "Entry"); + entry_block.ref(); // Entry block gets a reference because we enter it to begin. + try irb.setCursorAtEndAndAppendBlock(entry_block); + + const result = try irb.genNode(body_node, scope, LVal.None); + if (!result.isNoReturn()) { + const void_inst = try Instruction.Const.buildVoid(&irb, scope, false); + (try Instruction.Return.build(&irb, scope, void_inst)).setGenerated(); + } + + return irb.finish(); +} diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig index 5cde12f65c..e74c84e02c 100644 --- a/src-self-hosted/module.zig +++ b/src-self-hosted/module.zig @@ -15,12 +15,21 @@ const errmsg = @import("errmsg.zig"); const ast = std.zig.ast; const event = std.event; const assert = std.debug.assert; +const AtomicRmwOp = builtin.AtomicRmwOp; +const AtomicOrder = builtin.AtomicOrder; +const Scope = @import("scope.zig").Scope; +const Decl = @import("decl.zig").Decl; +const ir = @import("ir.zig"); +const Visib = @import("visib.zig").Visib; +const ParsedFile = @import("parsed_file.zig").ParsedFile; +const Value = @import("value.zig").Value; +const Type = Value.Type; pub const Module = struct { loop: *event.Loop, name: Buffer, root_src_path: ?[]const u8, - module: llvm.ModuleRef, + llvm_module: llvm.ModuleRef, context: llvm.ContextRef, builder: llvm.BuilderRef, target: Target, @@ -91,6 +100,16 @@ pub const Module = struct { compile_errors: event.Locked(CompileErrList), + meta_type: *Type.MetaType, + void_type: *Type.Void, + bool_type: *Type.Bool, + noreturn_type: *Type.NoReturn, + + void_value: *Value.Void, + true_value: *Value.Bool, + false_value: *Value.Bool, + noreturn_value: *Value.NoReturn, + const CompileErrList = std.ArrayList(*errmsg.Msg); // TODO handle some of these earlier and report them in a way other than error codes @@ -129,6 +148,7 @@ pub const Module = struct { Overflow, NotSupported, BufferTooSmall, + Unimplemented, }; pub const Event = union(enum) { @@ -180,8 +200,8 @@ pub const Module = struct { const context = c.LLVMContextCreate() orelse return error.OutOfMemory; errdefer c.LLVMContextDispose(context); - const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory; - errdefer c.LLVMDisposeModule(module); + const llvm_module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory; + errdefer c.LLVMDisposeModule(llvm_module); const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory; errdefer c.LLVMDisposeBuilder(builder); @@ -189,12 +209,12 @@ pub const Module = struct { const events = try event.Channel(Event).create(loop, 0); errdefer events.destroy(); - return loop.allocator.create(Module{ + const module = try loop.allocator.create(Module{ .loop = loop, .events = events, .name = name_buffer, .root_src_path = root_src_path, - .module = module, + .llvm_module = llvm_module, .context = context, .builder = builder, .target = target.*, @@ -248,7 +268,109 @@ pub const Module = struct { .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)), .build_group = event.Group(BuildError!void).init(loop), .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)), + + .meta_type = undefined, + .void_type = undefined, + .void_value = undefined, + .bool_type = undefined, + .true_value = undefined, + .false_value = undefined, + .noreturn_type = undefined, + .noreturn_value = undefined, + }); + try module.initTypes(); + return module; + } + + fn initTypes(module: *Module) !void { + module.meta_type = try module.a().create(Type.MetaType{ + .base = Type{ + .base = Value{ + .id = Value.Id.Type, + .typeof = undefined, + .ref_count = 3, // 3 because it references itself twice + }, + .id = builtin.TypeId.Type, + }, + .value = undefined, + }); + module.meta_type.value = &module.meta_type.base; + module.meta_type.base.base.typeof = &module.meta_type.base; + errdefer module.a().destroy(module.meta_type); + + module.void_type = try module.a().create(Type.Void{ + .base = Type{ + .base = Value{ + .id = Value.Id.Type, + .typeof = &Type.MetaType.get(module).base, + .ref_count = 1, + }, + .id = builtin.TypeId.Void, + }, + }); + errdefer module.a().destroy(module.void_type); + + module.noreturn_type = try module.a().create(Type.NoReturn{ + .base = Type{ + .base = Value{ + .id = Value.Id.Type, + .typeof = &Type.MetaType.get(module).base, + .ref_count = 1, + }, + .id = builtin.TypeId.NoReturn, + }, + }); + errdefer module.a().destroy(module.noreturn_type); + + module.bool_type = try module.a().create(Type.Bool{ + .base = Type{ + .base = Value{ + .id = Value.Id.Type, + .typeof = &Type.MetaType.get(module).base, + .ref_count = 1, + }, + .id = builtin.TypeId.Bool, + }, + }); + errdefer module.a().destroy(module.bool_type); + + module.void_value = try module.a().create(Value.Void{ + .base = Value{ + .id = Value.Id.Void, + .typeof = &Type.Void.get(module).base, + .ref_count = 1, + }, + }); + errdefer module.a().destroy(module.void_value); + + module.true_value = try module.a().create(Value.Bool{ + .base = Value{ + .id = Value.Id.Bool, + .typeof = &Type.Bool.get(module).base, + .ref_count = 1, + }, + .x = true, + }); + errdefer module.a().destroy(module.true_value); + + module.false_value = try module.a().create(Value.Bool{ + .base = Value{ + .id = Value.Id.Bool, + .typeof = &Type.Bool.get(module).base, + .ref_count = 1, + }, + .x = false, }); + errdefer module.a().destroy(module.false_value); + + module.noreturn_value = try module.a().create(Value.NoReturn{ + .base = Value{ + .id = Value.Id.NoReturn, + .typeof = &Type.NoReturn.get(module).base, + .ref_count = 1, + }, + }); + errdefer module.a().destroy(module.noreturn_value); } fn dump(self: *Module) void { @@ -256,9 +378,17 @@ pub const Module = struct { } pub fn destroy(self: *Module) void { + self.noreturn_value.base.deref(self); + self.void_value.base.deref(self); + self.false_value.base.deref(self); + self.true_value.base.deref(self); + self.noreturn_type.base.base.deref(self); + self.void_type.base.base.deref(self); + self.meta_type.base.base.deref(self); + self.events.destroy(); c.LLVMDisposeBuilder(self.builder); - c.LLVMDisposeModule(self.module); + c.LLVMDisposeModule(self.llvm_module); c.LLVMContextDispose(self.context); self.name.deinit(); @@ -331,8 +461,8 @@ pub const Module = struct { const tree = &parsed_file.tree; // create empty struct for it - const decls = try Scope.Decls.create(self.a(), null); - errdefer decls.destroy(); + const decls = try Scope.Decls.create(self, null); + defer decls.base.deref(self); var decl_group = event.Group(BuildError!void).init(self.loop); errdefer decl_group.cancelAll(); @@ -359,14 +489,17 @@ pub const Module = struct { .id = Decl.Id.Fn, .name = name, .visib = parseVisibToken(tree, fn_proto.visib_token), - .resolution = Decl.Resolution.Unresolved, + .resolution = event.Future(BuildError!void).init(self.loop), + .resolution_in_progress = 0, + .parsed_file = parsed_file, + .parent_scope = &decls.base, }, .value = Decl.Fn.Val{ .Unresolved = {} }, .fn_proto = fn_proto, }); errdefer self.a().destroy(fn_decl); - try decl_group.call(addTopLevelDecl, self, parsed_file, &fn_decl.base); + try decl_group.call(addTopLevelDecl, self, &fn_decl.base); }, ast.Node.Id.TestDecl => @panic("TODO"), else => unreachable, @@ -376,12 +509,12 @@ pub const Module = struct { try await (async self.build_group.wait() catch unreachable); } - async fn addTopLevelDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void { - const is_export = decl.isExported(&parsed_file.tree); + async fn addTopLevelDecl(self: *Module, decl: *Decl) !void { + const is_export = decl.isExported(&decl.parsed_file.tree); if (is_export) { - try self.build_group.call(verifyUniqueSymbol, self, parsed_file, decl); - try self.build_group.call(generateDecl, self, parsed_file, decl); + try self.build_group.call(verifyUniqueSymbol, self, decl); + try self.build_group.call(resolveDecl, self, decl); } } @@ -416,36 +549,21 @@ pub const Module = struct { try compile_errors.value.append(msg); } - async fn verifyUniqueSymbol(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void { + async fn verifyUniqueSymbol(self: *Module, decl: *Decl) !void { const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable); defer exported_symbol_names.release(); if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| { try self.addCompileError( - parsed_file, + decl.parsed_file, decl.getSpan(), "exported symbol collision: '{}'", decl.name, ); + // TODO add error note showing location of other symbol } } - /// This declaration has been blessed as going into the final code generation. - async fn generateDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) void { - switch (decl.id) { - Decl.Id.Var => @panic("TODO"), - Decl.Id.Fn => { - const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl); - return await (async self.generateDeclFn(parsed_file, fn_decl) catch unreachable); - }, - Decl.Id.CompTime => @panic("TODO"), - } - } - - async fn generateDeclFn(self: *Module, parsed_file: *ParsedFile, fn_decl: *Decl.Fn) void { - fn_decl.value = Decl.Fn.Val{ .Ok = Value.Fn{} }; - } - pub fn link(self: *Module, out_file: ?[]const u8) !void { warn("TODO link"); return error.Todo; @@ -501,177 +619,48 @@ fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib } } -pub const Scope = struct { - id: Id, - parent: ?*Scope, - - pub const Id = enum { - Decls, - Block, - }; - - pub const Decls = struct { - base: Scope, - table: Decl.Table, - - pub fn create(a: *Allocator, parent: ?*Scope) !*Decls { - const self = try a.create(Decls{ - .base = Scope{ - .id = Id.Decls, - .parent = parent, - }, - .table = undefined, - }); - errdefer a.destroy(self); - - self.table = Decl.Table.init(a); - errdefer self.table.deinit(); - - return self; - } - - pub fn destroy(self: *Decls) void { - self.table.deinit(); - self.table.allocator.destroy(self); - self.* = undefined; - } - }; - - pub const Block = struct { - base: Scope, - }; -}; - -pub const Visib = enum { - Private, - Pub, -}; - -pub const Decl = struct { - id: Id, - name: []const u8, - visib: Visib, - resolution: Resolution, - - pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8); - - pub fn isExported(base: *const Decl, tree: *ast.Tree) bool { - switch (base.id) { - Id.Fn => { - const fn_decl = @fieldParentPtr(Fn, "base", base); - return fn_decl.isExported(tree); - }, - else => return false, - } +/// This declaration has been blessed as going into the final code generation. +pub async fn resolveDecl(module: *Module, decl: *Decl) !void { + if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) { + decl.resolution.data = await (async generateDecl(module, decl) catch unreachable); + decl.resolution.resolve(); + } else { + return (await (async decl.resolution.get() catch unreachable)).*; } +} - pub fn getSpan(base: *const Decl) errmsg.Span { - switch (base.id) { - Id.Fn => { - const fn_decl = @fieldParentPtr(Fn, "base", base); - const fn_proto = fn_decl.fn_proto; - const start = fn_proto.fn_token; - const end = fn_proto.name_token orelse start; - return errmsg.Span{ - .first = start, - .last = end + 1, - }; - }, - else => @panic("TODO"), - } +/// The function that actually does the generation. +async fn generateDecl(module: *Module, decl: *Decl) !void { + switch (decl.id) { + Decl.Id.Var => @panic("TODO"), + Decl.Id.Fn => { + const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl); + return await (async generateDeclFn(module, fn_decl) catch unreachable); + }, + Decl.Id.CompTime => @panic("TODO"), } +} - pub const Resolution = enum { - Unresolved, - InProgress, - Invalid, - Ok, - }; - - pub const Id = enum { - Var, - Fn, - CompTime, - }; - - pub const Var = struct { - base: Decl, - }; +async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void { + const body_node = fn_decl.fn_proto.body_node orelse @panic("TODO extern fn proto decl"); - pub const Fn = struct { - base: Decl, - value: Val, - fn_proto: *const ast.Node.FnProto, + const fndef_scope = try Scope.FnDef.create(module, fn_decl.base.parent_scope); + defer fndef_scope.base.deref(module); - // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous - pub const Val = union { - Unresolved: void, - Ok: Value.Fn, - }; + const fn_type = try Type.Fn.create(module); + defer fn_type.base.base.deref(module); - pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 { - return if (self.fn_proto.extern_export_inline_token) |tok_index| x: { - const token = tree.tokens.at(tok_index); - break :x switch (token.id) { - Token.Id.Extern => tree.tokenSlicePtr(token), - else => null, - }; - } else null; - } + const fn_val = try Value.Fn.create(module, fn_type, fndef_scope); + defer fn_val.base.deref(module); - pub fn isExported(self: Fn, tree: *ast.Tree) bool { - if (self.fn_proto.extern_export_inline_token) |tok_index| { - const token = tree.tokens.at(tok_index); - return token.id == Token.Id.Keyword_export; - } else { - return false; - } - } - }; + fn_decl.value = Decl.Fn.Val{ .Ok = fn_val }; - pub const CompTime = struct { - base: Decl, - }; -}; - -pub const Value = struct { - pub const Fn = struct {}; -}; - -pub const Type = struct { - id: Id, - - pub const Id = enum { - Type, - Void, - Bool, - NoReturn, - Int, - Float, - Pointer, - Array, - Struct, - ComptimeFloat, - ComptimeInt, - Undefined, - Null, - Optional, - ErrorUnion, - ErrorSet, - Enum, - Union, - Fn, - Opaque, - Promise, - }; - - pub const Struct = struct { - base: Type, - decls: *Scope.Decls, - }; -}; - -pub const ParsedFile = struct { - tree: ast.Tree, - realpath: []const u8, -}; + const code = try await (async ir.gen( + module, + body_node, + &fndef_scope.base, + fn_decl.base.parsed_file, + ) catch unreachable); + //code.dump(); + //try await (async irAnalyze(module, func) catch unreachable); +} diff --git a/src-self-hosted/parsed_file.zig b/src-self-hosted/parsed_file.zig new file mode 100644 index 0000000000..d728c2fd18 --- /dev/null +++ b/src-self-hosted/parsed_file.zig @@ -0,0 +1,6 @@ +const ast = @import("std").zig.ast; + +pub const ParsedFile = struct { + tree: ast.Tree, + realpath: []const u8, +}; diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig index b73dcb4ed3..8f8d016a7c 100644 --- a/src-self-hosted/scope.zig +++ b/src-self-hosted/scope.zig @@ -1,16 +1,234 @@ +const std = @import("std"); +const Allocator = mem.Allocator; +const Decl = @import("decl.zig").Decl; +const Module = @import("module.zig").Module; +const mem = std.mem; +const ast = std.zig.ast; +const Value = @import("value.zig").Value; +const ir = @import("ir.zig"); + pub const Scope = struct { id: Id, - parent: *Scope, + parent: ?*Scope, + ref_count: usize, + + pub fn ref(base: *Scope) void { + base.ref_count += 1; + } + + pub fn deref(base: *Scope, module: *Module) void { + base.ref_count -= 1; + if (base.ref_count == 0) { + if (base.parent) |parent| parent.deref(module); + switch (base.id) { + Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(), + Id.Block => @fieldParentPtr(Block, "base", base).destroy(module), + Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(module), + Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(module), + Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(module), + Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(module), + } + } + } + + pub fn findFnDef(base: *Scope) ?*FnDef { + var scope = base; + while (true) { + switch (scope.id) { + Id.FnDef => return @fieldParentPtr(FnDef, "base", base), + Id.Decls => return null, + + Id.Block, + Id.Defer, + Id.DeferExpr, + Id.CompTime, + => scope = scope.parent orelse return null, + } + } + } pub const Id = enum { Decls, Block, - Defer, - DeferExpr, - VarDecl, - CImport, - Loop, FnDef, CompTime, + Defer, + DeferExpr, + }; + + pub const Decls = struct { + base: Scope, + table: Decl.Table, + + /// Creates a Decls scope with 1 reference + pub fn create(module: *Module, parent: ?*Scope) !*Decls { + const self = try module.a().create(Decls{ + .base = Scope{ + .id = Id.Decls, + .parent = parent, + .ref_count = 1, + }, + .table = undefined, + }); + errdefer module.a().destroy(self); + + self.table = Decl.Table.init(module.a()); + errdefer self.table.deinit(); + + if (parent) |p| p.ref(); + + return self; + } + + pub fn destroy(self: *Decls) void { + self.table.deinit(); + self.table.allocator.destroy(self); + } + }; + + pub const Block = struct { + base: Scope, + incoming_values: std.ArrayList(*ir.Instruction), + incoming_blocks: std.ArrayList(*ir.BasicBlock), + end_block: *ir.BasicBlock, + is_comptime: *ir.Instruction, + + /// Creates a Block scope with 1 reference + pub fn create(module: *Module, parent: ?*Scope) !*Block { + const self = try module.a().create(Block{ + .base = Scope{ + .id = Id.Block, + .parent = parent, + .ref_count = 1, + }, + .incoming_values = undefined, + .incoming_blocks = undefined, + .end_block = undefined, + .is_comptime = undefined, + }); + errdefer module.a().destroy(self); + + if (parent) |p| p.ref(); + return self; + } + + pub fn destroy(self: *Block, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const FnDef = struct { + base: Scope, + + /// This reference is not counted so that the scope can get destroyed with the function + fn_val: *Value.Fn, + + /// Creates a FnDef scope with 1 reference + /// Must set the fn_val later + pub fn create(module: *Module, parent: ?*Scope) !*FnDef { + const self = try module.a().create(FnDef{ + .base = Scope{ + .id = Id.FnDef, + .parent = parent, + .ref_count = 1, + }, + .fn_val = undefined, + }); + + if (parent) |p| p.ref(); + + return self; + } + + pub fn destroy(self: *FnDef, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const CompTime = struct { + base: Scope, + + /// Creates a CompTime scope with 1 reference + pub fn create(module: *Module, parent: ?*Scope) !*CompTime { + const self = try module.a().create(CompTime{ + .base = Scope{ + .id = Id.CompTime, + .parent = parent, + .ref_count = 1, + }, + }); + + if (parent) |p| p.ref(); + return self; + } + + pub fn destroy(self: *CompTime, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Defer = struct { + base: Scope, + defer_expr_scope: *DeferExpr, + kind: Kind, + + pub const Kind = enum { + ScopeExit, + ErrorExit, + }; + + /// Creates a Defer scope with 1 reference + pub fn create( + module: *Module, + parent: ?*Scope, + kind: Kind, + defer_expr_scope: *DeferExpr, + ) !*Defer { + const self = try module.a().create(Defer{ + .base = Scope{ + .id = Id.Defer, + .parent = parent, + .ref_count = 1, + }, + .defer_expr_scope = defer_expr_scope, + .kind = kind, + }); + errdefer module.a().destroy(self); + + defer_expr_scope.base.ref(); + + if (parent) |p| p.ref(); + return self; + } + + pub fn destroy(self: *Defer, module: *Module) void { + self.defer_expr_scope.base.deref(module); + module.a().destroy(self); + } + }; + + pub const DeferExpr = struct { + base: Scope, + expr_node: *ast.Node, + + /// Creates a DeferExpr scope with 1 reference + pub fn create(module: *Module, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr { + const self = try module.a().create(DeferExpr{ + .base = Scope{ + .id = Id.DeferExpr, + .parent = parent, + .ref_count = 1, + }, + .expr_node = expr_node, + }); + errdefer module.a().destroy(self); + + if (parent) |p| p.ref(); + return self; + } + + pub fn destroy(self: *DeferExpr, module: *Module) void { + module.a().destroy(self); + } }; }; diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig new file mode 100644 index 0000000000..4b3918854d --- /dev/null +++ b/src-self-hosted/type.zig @@ -0,0 +1,268 @@ +const builtin = @import("builtin"); +const Scope = @import("scope.zig").Scope; +const Module = @import("module.zig").Module; +const Value = @import("value.zig").Value; + +pub const Type = struct { + base: Value, + id: Id, + + pub const Id = builtin.TypeId; + + pub fn destroy(base: *Type, module: *Module) void { + switch (base.id) { + Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(module), + Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module), + Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(module), + Id.Void => @fieldParentPtr(Void, "base", base).destroy(module), + Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module), + Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module), + Id.Int => @fieldParentPtr(Int, "base", base).destroy(module), + Id.Float => @fieldParentPtr(Float, "base", base).destroy(module), + Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(module), + Id.Array => @fieldParentPtr(Array, "base", base).destroy(module), + Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(module), + Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(module), + Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(module), + Id.Null => @fieldParentPtr(Null, "base", base).destroy(module), + Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(module), + Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(module), + Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(module), + Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(module), + Id.Union => @fieldParentPtr(Union, "base", base).destroy(module), + Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(module), + Id.Block => @fieldParentPtr(Block, "base", base).destroy(module), + Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(module), + Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(module), + Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(module), + Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(module), + } + } + + pub const Struct = struct { + base: Type, + decls: *Scope.Decls, + + pub fn destroy(self: *Struct, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Fn = struct { + base: Type, + + pub fn create(module: *Module) !*Fn { + return module.a().create(Fn{ + .base = Type{ + .base = Value{ + .id = Value.Id.Type, + .typeof = &MetaType.get(module).base, + .ref_count = 1, + }, + .id = builtin.TypeId.Fn, + }, + }); + } + + pub fn destroy(self: *Fn, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const MetaType = struct { + base: Type, + value: *Type, + + /// Adds 1 reference to the resulting type + pub fn get(module: *Module) *MetaType { + module.meta_type.base.base.ref(); + return module.meta_type; + } + + pub fn destroy(self: *MetaType, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Void = struct { + base: Type, + + /// Adds 1 reference to the resulting type + pub fn get(module: *Module) *Void { + module.void_type.base.base.ref(); + return module.void_type; + } + + pub fn destroy(self: *Void, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Bool = struct { + base: Type, + + /// Adds 1 reference to the resulting type + pub fn get(module: *Module) *Bool { + module.bool_type.base.base.ref(); + return module.bool_type; + } + + pub fn destroy(self: *Bool, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const NoReturn = struct { + base: Type, + + /// Adds 1 reference to the resulting type + pub fn get(module: *Module) *NoReturn { + module.noreturn_type.base.base.ref(); + return module.noreturn_type; + } + + pub fn destroy(self: *NoReturn, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Int = struct { + base: Type, + + pub fn destroy(self: *Int, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Float = struct { + base: Type, + + pub fn destroy(self: *Float, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Pointer = struct { + base: Type, + + pub fn destroy(self: *Pointer, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Array = struct { + base: Type, + + pub fn destroy(self: *Array, module: *Module) void { + module.a().destroy(self); + } + }; + pub const ComptimeFloat = struct { + base: Type, + + pub fn destroy(self: *ComptimeFloat, module: *Module) void { + module.a().destroy(self); + } + }; + pub const ComptimeInt = struct { + base: Type, + + pub fn destroy(self: *ComptimeInt, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Undefined = struct { + base: Type, + + pub fn destroy(self: *Undefined, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Null = struct { + base: Type, + + pub fn destroy(self: *Null, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Optional = struct { + base: Type, + + pub fn destroy(self: *Optional, module: *Module) void { + module.a().destroy(self); + } + }; + pub const ErrorUnion = struct { + base: Type, + + pub fn destroy(self: *ErrorUnion, module: *Module) void { + module.a().destroy(self); + } + }; + pub const ErrorSet = struct { + base: Type, + + pub fn destroy(self: *ErrorSet, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Enum = struct { + base: Type, + + pub fn destroy(self: *Enum, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Union = struct { + base: Type, + + pub fn destroy(self: *Union, module: *Module) void { + module.a().destroy(self); + } + }; + pub const Namespace = struct { + base: Type, + + pub fn destroy(self: *Namespace, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Block = struct { + base: Type, + + pub fn destroy(self: *Block, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const BoundFn = struct { + base: Type, + + pub fn destroy(self: *BoundFn, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const ArgTuple = struct { + base: Type, + + pub fn destroy(self: *ArgTuple, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Opaque = struct { + base: Type, + + pub fn destroy(self: *Opaque, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Promise = struct { + base: Type, + + pub fn destroy(self: *Promise, module: *Module) void { + module.a().destroy(self); + } + }; +}; diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig new file mode 100644 index 0000000000..b53d03d0ad --- /dev/null +++ b/src-self-hosted/value.zig @@ -0,0 +1,125 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Scope = @import("scope.zig").Scope; +const Module = @import("module.zig").Module; + +/// Values are ref-counted, heap-allocated, and copy-on-write +/// If there is only 1 ref then write need not copy +pub const Value = struct { + id: Id, + typeof: *Type, + ref_count: usize, + + pub fn ref(base: *Value) void { + base.ref_count += 1; + } + + pub fn deref(base: *Value, module: *Module) void { + base.ref_count -= 1; + if (base.ref_count == 0) { + base.typeof.base.deref(module); + switch (base.id) { + Id.Type => @fieldParentPtr(Type, "base", base).destroy(module), + Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module), + Id.Void => @fieldParentPtr(Void, "base", base).destroy(module), + Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module), + Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module), + } + } + } + + pub fn dump(base: *const Value) void { + std.debug.warn("{}", @tagName(base.id)); + } + + pub const Id = enum { + Type, + Fn, + Void, + Bool, + NoReturn, + }; + + pub const Type = @import("type.zig").Type; + + pub const Fn = struct { + base: Value, + + /// parent should be the top level decls or container decls + fndef_scope: *Scope.FnDef, + + /// parent is scope for last parameter + child_scope: *Scope, + + /// parent is child_scope + block_scope: *Scope.Block, + + /// Creates a Fn value with 1 ref + pub fn create(module: *Module, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef) !*Fn { + const self = try module.a().create(Fn{ + .base = Value{ + .id = Value.Id.Fn, + .typeof = &fn_type.base, + .ref_count = 1, + }, + .fndef_scope = fndef_scope, + .child_scope = &fndef_scope.base, + .block_scope = undefined, + }); + fn_type.base.base.ref(); + fndef_scope.fn_val = self; + fndef_scope.base.ref(); + return self; + } + + pub fn destroy(self: *Fn, module: *Module) void { + self.fndef_scope.base.deref(module); + module.a().destroy(self); + } + }; + + pub const Void = struct { + base: Value, + + pub fn get(module: *Module) *Void { + module.void_value.base.ref(); + return module.void_value; + } + + pub fn destroy(self: *Void, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const Bool = struct { + base: Value, + x: bool, + + pub fn get(module: *Module, x: bool) *Bool { + if (x) { + module.true_value.base.ref(); + return module.true_value; + } else { + module.false_value.base.ref(); + return module.false_value; + } + } + + pub fn destroy(self: *Bool, module: *Module) void { + module.a().destroy(self); + } + }; + + pub const NoReturn = struct { + base: Value, + + pub fn get(module: *Module) *NoReturn { + module.noreturn_value.base.ref(); + return module.noreturn_value; + } + + pub fn destroy(self: *NoReturn, module: *Module) void { + module.a().destroy(self); + } + }; +}; diff --git a/src-self-hosted/visib.zig b/src-self-hosted/visib.zig new file mode 100644 index 0000000000..3704600cca --- /dev/null +++ b/src-self-hosted/visib.zig @@ -0,0 +1,4 @@ +pub const Visib = enum { + Private, + Pub, +}; diff --git a/std/event/future.zig b/std/event/future.zig index b6ec861f77..23fa570c8f 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -57,7 +57,7 @@ test "std.event.Future" { const allocator = &da.allocator; var loop: Loop = undefined; - try loop.initSingleThreaded(allocator); + try loop.initMultiThreaded(allocator); defer loop.deinit(); const handle = try async testFuture(&loop); diff --git a/std/zig/ast.zig b/std/zig/ast.zig index 63518c5182..004f9278b9 100644 --- a/std/zig/ast.zig +++ b/std/zig/ast.zig @@ -970,14 +970,8 @@ pub const Node = struct { pub const Defer = struct { base: Node, defer_token: TokenIndex, - kind: Kind, expr: *Node, - const Kind = enum { - Error, - Unconditional, - }; - pub fn iterate(self: *Defer, index: usize) ?*Node { var i = index; diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 9f0371d4da..9842ba2a17 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -1041,11 +1041,6 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { const node = try arena.create(ast.Node.Defer{ .base = ast.Node{ .id = ast.Node.Id.Defer }, .defer_token = token_index, - .kind = switch (token_ptr.id) { - Token.Id.Keyword_defer => ast.Node.Defer.Kind.Unconditional, - Token.Id.Keyword_errdefer => ast.Node.Defer.Kind.Error, - else => unreachable, - }, .expr = undefined, }); const node_ptr = try block.statements.addOne(); -- cgit v1.2.3 From 363f4facea7fac2d6cfeab9d1d276ecd8e8e4df0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 15 Jul 2018 00:04:12 -0400 Subject: self-hosted: generate LLVM IR for simple function --- src-self-hosted/codegen.zig | 158 +++++++++++++++++++++++++++++++++++++++- src-self-hosted/compilation.zig | 6 +- src-self-hosted/ir.zig | 56 ++++++++++++-- src-self-hosted/llvm.zig | 64 +++++++++++++++- src-self-hosted/scope.zig | 32 ++++++++ src-self-hosted/type.zig | 75 +++++++++++++++++++ src-self-hosted/value.zig | 22 ++++++ std/event/future.zig | 10 +++ 8 files changed, 412 insertions(+), 11 deletions(-) (limited to 'std/event/future.zig') diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index a07485e74e..698f1e5b45 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -8,6 +8,7 @@ const ir = @import("ir.zig"); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const event = std.event; +const assert = std.debug.assert; pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void { fn_val.base.ref(); @@ -35,9 +36,23 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) try renderToLlvmModule(&ofile, fn_val, code); + // TODO module level assembly + //if (buf_len(&g->global_asm) != 0) { + // LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm)); + //} + + // TODO + //ZigLLVMDIBuilderFinalize(g->dbuilder); + if (comp.verbose_llvm_ir) { llvm.DumpModule(ofile.module); } + + // verify the llvm module when safety is on + if (std.debug.runtime_safety) { + var error_ptr: ?[*]u8 = null; + _ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr); + } } pub const ObjectFile = struct { @@ -55,5 +70,146 @@ pub const ObjectFile = struct { pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void { // TODO audit more of codegen.cpp:fn_llvm_value and port more logic const llvm_fn_type = try fn_val.base.typeof.getLlvmType(ofile); - const llvm_fn = llvm.AddFunction(ofile.module, fn_val.symbol_name.ptr(), llvm_fn_type); + const llvm_fn = llvm.AddFunction( + ofile.module, + fn_val.symbol_name.ptr(), + llvm_fn_type, + ) orelse return error.OutOfMemory; + + const want_fn_safety = fn_val.block_scope.safety.get(ofile.comp); + if (want_fn_safety and ofile.comp.haveLibC()) { + try addLLVMFnAttr(ofile, llvm_fn, "sspstrong"); + try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4"); + } + + // TODO + //if (fn_val.align_stack) |align_stack| { + // try addLLVMFnAttrInt(ofile, llvm_fn, "alignstack", align_stack); + //} + + const fn_type = fn_val.base.typeof.cast(Type.Fn).?; + + try addLLVMFnAttr(ofile, llvm_fn, "nounwind"); + //add_uwtable_attr(g, fn_table_entry->llvm_value); + try addLLVMFnAttr(ofile, llvm_fn, "nobuiltin"); + + //if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) { + // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true"); + // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr); + //} + + //if (fn_table_entry->section_name) { + // LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name)); + //} + //if (fn_table_entry->align_bytes > 0) { + // LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes); + //} else { + // // We'd like to set the best alignment for the function here, but on Darwin LLVM gives + // // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling + // // any of the functions for getting alignment. Not specifying the alignment should + // // use the ABI alignment, which is fine. + //} + + //if (!type_has_bits(return_type)) { + // // nothing to do + //} else if (type_is_codegen_pointer(return_type)) { + // addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull"); + //} else if (handle_is_ptr(return_type) && + // calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc)) + //{ + // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); + // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); + //} + + // TODO set parameter attributes + + // TODO + //uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); + //if (err_ret_trace_arg_index != UINT32_MAX) { + // addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); + //} + + const cur_ret_ptr = if (fn_type.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null; + + // build all basic blocks + for (code.basic_block_list.toSlice()) |bb| { + bb.llvm_block = llvm.AppendBasicBlockInContext( + ofile.context, + llvm_fn, + bb.name_hint, + ) orelse return error.OutOfMemory; + } + const entry_bb = code.basic_block_list.at(0); + llvm.PositionBuilderAtEnd(ofile.builder, entry_bb.llvm_block); + + llvm.ClearCurrentDebugLocation(ofile.builder); + + // TODO set up error return tracing + // TODO allocate temporary stack values + // TODO create debug variable declarations for variables and allocate all local variables + // TODO finishing error return trace setup. we have to do this after all the allocas. + // TODO create debug variable declarations for parameters + + for (code.basic_block_list.toSlice()) |current_block| { + llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block); + for (current_block.instruction_list.toSlice()) |instruction| { + if (instruction.ref_count == 0 and !instruction.hasSideEffects()) continue; + + instruction.llvm_value = try instruction.render(ofile, fn_val); + } + current_block.llvm_exit_block = llvm.GetInsertBlock(ofile.builder); + } +} + +fn addLLVMAttr( + ofile: *ObjectFile, + val: llvm.ValueRef, + attr_index: llvm.AttributeIndex, + attr_name: []const u8, +) !void { + const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len); + assert(kind_id != 0); + const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, 0) orelse return error.OutOfMemory; + llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); +} + +fn addLLVMAttrStr( + ofile: *ObjectFile, + val: llvm.ValueRef, + attr_index: llvm.AttributeIndex, + attr_name: []const u8, + attr_val: []const u8, +) !void { + const llvm_attr = llvm.CreateStringAttribute( + ofile.context, + attr_name.ptr, + @intCast(c_uint, attr_name.len), + attr_val.ptr, + @intCast(c_uint, attr_val.len), + ) orelse return error.OutOfMemory; + llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); +} + +fn addLLVMAttrInt( + val: llvm.ValueRef, + attr_index: llvm.AttributeIndex, + attr_name: []const u8, + attr_val: u64, +) !void { + const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len); + assert(kind_id != 0); + const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, attr_val) orelse return error.OutOfMemory; + llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); +} + +fn addLLVMFnAttr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8) !void { + return addLLVMAttr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name); +} + +fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: []const u8) !void { + return addLLVMAttrStr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val); +} + +fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: u64) !void { + return addLLVMAttrInt(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val); } diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig index cbda7861bc..1dbbf21206 100644 --- a/src-self-hosted/compilation.zig +++ b/src-self-hosted/compilation.zig @@ -606,6 +606,10 @@ pub const Compilation = struct { return error.Todo; } + pub fn haveLibC(self: *Compilation) bool { + return self.libc_link_lib != null; + } + pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib { const is_libc = mem.eql(u8, name, "c"); @@ -741,7 +745,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { analyzed_code.dump(); } - // Kick off rendering to LLVM comp, but it doesn't block the fn decl + // Kick off rendering to LLVM module, but it doesn't block the fn decl // analysis from being complete. try comp.build_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code); } diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 22d5a067a7..0e0a4f9bf3 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -10,6 +10,8 @@ const assert = std.debug.assert; const Token = std.zig.Token; const ParsedFile = @import("parsed_file.zig").ParsedFile; const Span = @import("errmsg.zig").Span; +const llvm = @import("llvm.zig"); +const ObjectFile = @import("codegen.zig").ObjectFile; pub const LVal = enum { None, @@ -61,6 +63,9 @@ pub const Instruction = struct { /// the instruction that this one derives from in analysis parent: ?*Instruction, + /// populated durign codegen + llvm_value: ?llvm.ValueRef, + pub fn cast(base: *Instruction, comptime T: type) ?*T { if (base.id == comptime typeToId(T)) { return @fieldParentPtr(T, "base", base); @@ -108,14 +113,25 @@ pub const Instruction = struct { inline while (i < @memberCount(Id)) : (i += 1) { if (base.id == @field(Id, @memberName(Id, i))) { const T = @field(Instruction, @memberName(Id, i)); - const new_inst = try @fieldParentPtr(T, "base", base).analyze(ira); - new_inst.linkToParent(base); - return new_inst; + return @fieldParentPtr(T, "base", base).analyze(ira); } } unreachable; } + pub fn render(base: *Instruction, ofile: *ObjectFile, fn_val: *Value.Fn) (error{OutOfMemory}!?llvm.ValueRef) { + switch (base.id) { + Id.Return => return @fieldParentPtr(Return, "base", base).render(ofile, fn_val), + Id.Const => return @fieldParentPtr(Const, "base", base).render(ofile, fn_val), + Id.Ref => @panic("TODO"), + Id.DeclVar => @panic("TODO"), + Id.CheckVoidStmt => @panic("TODO"), + Id.Phi => @panic("TODO"), + Id.Br => @panic("TODO"), + Id.AddImplicitReturnType => @panic("TODO"), + } + } + fn getAsParam(param: *Instruction) !*Instruction { const child = param.child orelse return error.SemanticAnalysisFailed; switch (child.val) { @@ -186,6 +202,10 @@ pub const Instruction = struct { new_inst.val = IrVal{ .KnownValue = self.base.val.KnownValue.getRef() }; return new_inst; } + + pub fn render(self: *Const, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef { + return self.base.val.KnownValue.getLlvmConst(ofile); + } }; pub const Return = struct { @@ -214,6 +234,18 @@ pub const Instruction = struct { return ira.irb.build(Return, self.base.scope, self.base.span, Params{ .return_value = casted_value }); } + + pub fn render(self: *Return, ofile: *ObjectFile, fn_val: *Value.Fn) ?llvm.ValueRef { + const value = self.params.return_value.llvm_value; + const return_type = self.params.return_value.getKnownType(); + + if (return_type.handleIsPtr()) { + @panic("TODO"); + } else { + _ = llvm.BuildRet(ofile.builder, value); + } + return null; + } }; pub const Ref = struct { @@ -387,12 +419,16 @@ pub const Variable = struct { pub const BasicBlock = struct { ref_count: usize, - name_hint: []const u8, + name_hint: [*]const u8, // must be a C string literal debug_id: usize, scope: *Scope, instruction_list: std.ArrayList(*Instruction), ref_instruction: ?*Instruction, + /// for codegen + llvm_block: llvm.BasicBlockRef, + llvm_exit_block: llvm.BasicBlockRef, + /// the basic block that is derived from this one in analysis child: ?*BasicBlock, @@ -426,7 +462,7 @@ pub const Code = struct { pub fn dump(self: *Code) void { var bb_i: usize = 0; for (self.basic_block_list.toSliceConst()) |bb| { - std.debug.warn("{}_{}:\n", bb.name_hint, bb.debug_id); + std.debug.warn("{s}_{}:\n", bb.name_hint, bb.debug_id); for (bb.instruction_list.toSliceConst()) |instr| { std.debug.warn(" "); instr.dump(); @@ -475,7 +511,7 @@ pub const Builder = struct { } /// No need to clean up resources thanks to the arena allocator. - pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: []const u8) !*BasicBlock { + pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: [*]const u8) !*BasicBlock { const basic_block = try self.arena().create(BasicBlock{ .ref_count = 0, .name_hint = name_hint, @@ -485,6 +521,8 @@ pub const Builder = struct { .child = null, .parent = null, .ref_instruction = null, + .llvm_block = undefined, + .llvm_exit_block = undefined, }); self.next_debug_id += 1; return basic_block; @@ -600,7 +638,7 @@ pub const Builder = struct { if (block.label) |label| { block_scope.incoming_values = std.ArrayList(*Instruction).init(irb.arena()); block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena()); - block_scope.end_block = try irb.createBasicBlock(parent_scope, "BlockEnd"); + block_scope.end_block = try irb.createBasicBlock(parent_scope, c"BlockEnd"); block_scope.is_comptime = try irb.buildConstBool( parent_scope, Span.token(block.lbrace), @@ -777,6 +815,7 @@ pub const Builder = struct { .span = span, .child = null, .parent = null, + .llvm_value = undefined, }, .params = params, }); @@ -968,7 +1007,7 @@ pub async fn gen( var irb = try Builder.init(comp, parsed_file); errdefer irb.abort(); - const entry_block = try irb.createBasicBlock(scope, "Entry"); + const entry_block = try irb.createBasicBlock(scope, c"Entry"); entry_block.ref(); // Entry block gets a reference because we enter it to begin. try irb.setCursorAtEndAndAppendBlock(entry_block); @@ -1013,6 +1052,7 @@ pub async fn analyze(comp: *Compilation, parsed_file: *ParsedFile, old_code: *Co } const return_inst = try old_instruction.analyze(&ira); + return_inst.linkToParent(old_instruction); // Note: if we ever modify the above to handle error.CompileError by continuing analysis, // then here we want to check if ira.isCompTime() and return early if true diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig index b815f75b05..13480dc2c6 100644 --- a/src-self-hosted/llvm.zig +++ b/src-self-hosted/llvm.zig @@ -2,29 +2,91 @@ const builtin = @import("builtin"); const c = @import("c.zig"); const assert = @import("std").debug.assert; +pub const AttributeIndex = c_uint; +pub const Bool = c_int; + pub const BuilderRef = removeNullability(c.LLVMBuilderRef); pub const ContextRef = removeNullability(c.LLVMContextRef); pub const ModuleRef = removeNullability(c.LLVMModuleRef); pub const ValueRef = removeNullability(c.LLVMValueRef); pub const TypeRef = removeNullability(c.LLVMTypeRef); +pub const BasicBlockRef = removeNullability(c.LLVMBasicBlockRef); +pub const AttributeRef = removeNullability(c.LLVMAttributeRef); +pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex; pub const AddFunction = c.LLVMAddFunction; +pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation; +pub const ConstInt = c.LLVMConstInt; +pub const ConstStringInContext = c.LLVMConstStringInContext; +pub const ConstStructInContext = c.LLVMConstStructInContext; pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext; +pub const CreateEnumAttribute = c.LLVMCreateEnumAttribute; +pub const CreateStringAttribute = c.LLVMCreateStringAttribute; pub const DisposeBuilder = c.LLVMDisposeBuilder; pub const DisposeModule = c.LLVMDisposeModule; +pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext; pub const DumpModule = c.LLVMDumpModule; +pub const FP128TypeInContext = c.LLVMFP128TypeInContext; +pub const FloatTypeInContext = c.LLVMFloatTypeInContext; +pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName; +pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext; +pub const HalfTypeInContext = c.LLVMHalfTypeInContext; +pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext; +pub const Int128TypeInContext = c.LLVMInt128TypeInContext; +pub const Int16TypeInContext = c.LLVMInt16TypeInContext; +pub const Int1TypeInContext = c.LLVMInt1TypeInContext; +pub const Int32TypeInContext = c.LLVMInt32TypeInContext; +pub const Int64TypeInContext = c.LLVMInt64TypeInContext; +pub const Int8TypeInContext = c.LLVMInt8TypeInContext; +pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext; +pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext; +pub const IntTypeInContext = c.LLVMIntTypeInContext; +pub const LabelTypeInContext = c.LLVMLabelTypeInContext; +pub const MDNodeInContext = c.LLVMMDNodeInContext; +pub const MDStringInContext = c.LLVMMDStringInContext; +pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext; pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext; +pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext; +pub const StructTypeInContext = c.LLVMStructTypeInContext; +pub const TokenTypeInContext = c.LLVMTokenTypeInContext; pub const VoidTypeInContext = c.LLVMVoidTypeInContext; +pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext; +pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext; +pub const ConstAllOnes = c.LLVMConstAllOnes; +pub const ConstNull = c.LLVMConstNull; + +pub const VerifyModule = LLVMVerifyModule; +extern fn LLVMVerifyModule(M: ModuleRef, Action: VerifierFailureAction, OutMessage: *?[*]u8) Bool; + +pub const GetInsertBlock = LLVMGetInsertBlock; +extern fn LLVMGetInsertBlock(Builder: BuilderRef) BasicBlockRef; pub const FunctionType = LLVMFunctionType; extern fn LLVMFunctionType( ReturnType: TypeRef, ParamTypes: [*]TypeRef, ParamCount: c_uint, - IsVarArg: c_int, + IsVarArg: Bool, ) ?TypeRef; +pub const GetParam = LLVMGetParam; +extern fn LLVMGetParam(Fn: ValueRef, Index: c_uint) ValueRef; + +pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext; +extern fn LLVMAppendBasicBlockInContext(C: ContextRef, Fn: ValueRef, Name: [*]const u8) ?BasicBlockRef; + +pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd; +extern fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef) void; + +pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction; +pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction; +pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction; +pub const VerifierFailureAction = c.LLVMVerifierFailureAction; + fn removeNullability(comptime T: type) type { comptime assert(@typeId(T) == builtin.TypeId.Optional); return T.Child; } + +pub const BuildRet = LLVMBuildRet; +extern fn LLVMBuildRet(arg0: BuilderRef, V: ?ValueRef) ValueRef; diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig index 6fd6456b12..4326617fa0 100644 --- a/src-self-hosted/scope.zig +++ b/src-self-hosted/scope.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const Allocator = mem.Allocator; const Decl = @import("decl.zig").Decl; const Compilation = @import("compilation.zig").Compilation; @@ -6,6 +7,7 @@ const mem = std.mem; const ast = std.zig.ast; const Value = @import("value.zig").Value; const ir = @import("ir.zig"); +const Span = @import("errmsg.zig").Span; pub const Scope = struct { id: Id, @@ -93,6 +95,35 @@ pub const Scope = struct { end_block: *ir.BasicBlock, is_comptime: *ir.Instruction, + safety: Safety, + + const Safety = union(enum) { + Auto, + Manual: Manual, + + const Manual = struct { + /// the source span that disabled the safety value + span: Span, + + /// whether safety is enabled + enabled: bool, + }; + + fn get(self: Safety, comp: *Compilation) bool { + return switch (self) { + Safety.Auto => switch (comp.build_mode) { + builtin.Mode.Debug, + builtin.Mode.ReleaseSafe, + => true, + builtin.Mode.ReleaseFast, + builtin.Mode.ReleaseSmall, + => false, + }, + @TagType(Safety).Manual => |man| man.enabled, + }; + } + }; + /// Creates a Block scope with 1 reference pub fn create(comp: *Compilation, parent: ?*Scope) !*Block { const self = try comp.a().create(Block{ @@ -105,6 +136,7 @@ pub const Scope = struct { .incoming_blocks = undefined, .end_block = undefined, .is_comptime = undefined, + .safety = Safety.Auto, }); errdefer comp.a().destroy(self); diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index 8349047749..670547cce2 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -72,6 +72,81 @@ pub const Type = struct { } } + pub fn handleIsPtr(base: *Type) bool { + switch (base.id) { + Id.Type, + Id.ComptimeFloat, + Id.ComptimeInt, + Id.Undefined, + Id.Null, + Id.Namespace, + Id.Block, + Id.BoundFn, + Id.ArgTuple, + Id.Opaque, + => unreachable, + + Id.NoReturn, + Id.Void, + Id.Bool, + Id.Int, + Id.Float, + Id.Pointer, + Id.ErrorSet, + Id.Enum, + Id.Fn, + Id.Promise, + => return false, + + Id.Struct => @panic("TODO"), + Id.Array => @panic("TODO"), + Id.Optional => @panic("TODO"), + Id.ErrorUnion => @panic("TODO"), + Id.Union => @panic("TODO"), + } + } + + pub fn hasBits(base: *Type) bool { + switch (base.id) { + Id.Type, + Id.ComptimeFloat, + Id.ComptimeInt, + Id.Undefined, + Id.Null, + Id.Namespace, + Id.Block, + Id.BoundFn, + Id.ArgTuple, + Id.Opaque, + => unreachable, + + Id.Void, + Id.NoReturn, + => return false, + + Id.Bool, + Id.Int, + Id.Float, + Id.Fn, + Id.Promise, + => return true, + + Id.ErrorSet => @panic("TODO"), + Id.Enum => @panic("TODO"), + Id.Pointer => @panic("TODO"), + Id.Struct => @panic("TODO"), + Id.Array => @panic("TODO"), + Id.Optional => @panic("TODO"), + Id.ErrorUnion => @panic("TODO"), + Id.Union => @panic("TODO"), + } + } + + pub fn cast(base: *Type, comptime T: type) ?*T { + if (base.id != @field(Id, @typeName(T))) return null; + return @fieldParentPtr(T, "base", base); + } + pub fn dump(base: *const Type) void { std.debug.warn("{}", @tagName(base.id)); } diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index 8c047b1513..e3b91d2807 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -2,6 +2,8 @@ const std = @import("std"); const builtin = @import("builtin"); const Scope = @import("scope.zig").Scope; const Compilation = @import("compilation.zig").Compilation; +const ObjectFile = @import("codegen.zig").ObjectFile; +const llvm = @import("llvm.zig"); /// Values are ref-counted, heap-allocated, and copy-on-write /// If there is only 1 ref then write need not copy @@ -39,6 +41,17 @@ pub const Value = struct { std.debug.warn("{}", @tagName(base.id)); } + pub fn getLlvmConst(base: *Value, ofile: *ObjectFile) (error{OutOfMemory}!?llvm.ValueRef) { + switch (base.id) { + Id.Type => unreachable, + Id.Fn => @panic("TODO"), + Id.Void => return null, + Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmConst(ofile), + Id.NoReturn => unreachable, + Id.Ptr => @panic("TODO"), + } + } + pub const Id = enum { Type, Fn, @@ -123,6 +136,15 @@ pub const Value = struct { pub fn destroy(self: *Bool, comp: *Compilation) void { comp.a().destroy(self); } + + pub fn getLlvmConst(self: *Bool, ofile: *ObjectFile) ?llvm.ValueRef { + const llvm_type = llvm.Int1TypeInContext(ofile.context); + if (self.x) { + return llvm.ConstAllOnes(llvm_type); + } else { + return llvm.ConstNull(llvm_type); + } + } }; pub const NoReturn = struct { diff --git a/std/event/future.zig b/std/event/future.zig index 23fa570c8f..0f27b4131b 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -40,6 +40,16 @@ pub fn Future(comptime T: type) type { return &self.data; } + /// Gets the data without waiting for it. If it's available, a pointer is + /// returned. Otherwise, null is returned. + pub fn getOrNull(self: *Self) ?*T { + if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) { + return &self.data; + } else { + return null; + } + } + /// Make the data become available. May be called only once. /// Before calling this, modify the `data` property. pub fn resolve(self: *Self) void { -- cgit v1.2.3 From 97bfeac13f89e1b5a22fcd7d4705341b4c3e1950 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 16 Jul 2018 20:52:50 -0400 Subject: self-hosted: create tmp dir for .o files and emit .o file for fn --- CMakeLists.txt | 1 + src-self-hosted/codegen.zig | 86 ++++++++++- src-self-hosted/compilation.zig | 335 ++++++++++++++++++++++++++++++++-------- src-self-hosted/ir.zig | 10 +- src-self-hosted/llvm.zig | 75 ++++++++- src-self-hosted/main.zig | 8 +- src-self-hosted/package.zig | 29 ++++ src-self-hosted/scope.zig | 32 ++-- src-self-hosted/target.zig | 116 ++++++++++---- src-self-hosted/test.zig | 3 +- src-self-hosted/type.zig | 58 +++---- src-self-hosted/value.zig | 41 ++++- src/zig_llvm.cpp | 5 + src/zig_llvm.h | 3 +- std/atomic/int.zig | 4 + std/buffer.zig | 13 ++ std/dwarf.zig | 37 +++++ std/event/future.zig | 39 ++++- std/index.zig | 3 + std/lazy_init.zig | 85 ++++++++++ 20 files changed, 808 insertions(+), 175 deletions(-) create mode 100644 src-self-hosted/package.zig create mode 100644 std/lazy_init.zig (limited to 'std/event/future.zig') diff --git a/CMakeLists.txt b/CMakeLists.txt index e606855555..0e7c1df350 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -479,6 +479,7 @@ set(ZIG_STD_FILES "index.zig" "io.zig" "json.zig" + "lazy_init.zig" "linked_list.zig" "macho.zig" "math/acos.zig" diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 698f1e5b45..28ba2a1564 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -1,19 +1,22 @@ const std = @import("std"); +const builtin = @import("builtin"); const Compilation = @import("compilation.zig").Compilation; -// we go through llvm instead of c for 2 reasons: -// 1. to avoid accidentally calling the non-thread-safe functions -// 2. patch up some of the types to remove nullability const llvm = @import("llvm.zig"); +const c = @import("c.zig"); const ir = @import("ir.zig"); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const event = std.event; const assert = std.debug.assert; +const DW = std.dwarf; pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void { fn_val.base.ref(); defer fn_val.base.deref(comp); - defer code.destroy(comp.a()); + defer code.destroy(comp.gpa()); + + var output_path = try await (async comp.createRandomOutputPath(comp.target.oFileExt()) catch unreachable); + errdefer output_path.deinit(); const llvm_handle = try comp.event_loop_local.getAnyLlvmContext(); defer llvm_handle.release(comp.event_loop_local); @@ -23,13 +26,56 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) const module = llvm.ModuleCreateWithNameInContext(comp.name.ptr(), context) orelse return error.OutOfMemory; defer llvm.DisposeModule(module); + llvm.SetTarget(module, comp.llvm_triple.ptr()); + llvm.SetDataLayout(module, comp.target_layout_str); + + if (comp.target.getObjectFormat() == builtin.ObjectFormat.coff) { + llvm.AddModuleCodeViewFlag(module); + } else { + llvm.AddModuleDebugInfoFlag(module); + } + const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory; defer llvm.DisposeBuilder(builder); + const dibuilder = llvm.CreateDIBuilder(module, true) orelse return error.OutOfMemory; + defer llvm.DisposeDIBuilder(dibuilder); + + // Don't use ZIG_VERSION_STRING here. LLVM misparses it when it includes + // the git revision. + const producer = try std.Buffer.allocPrint( + &code.arena.allocator, + "zig {}.{}.{}", + u32(c.ZIG_VERSION_MAJOR), + u32(c.ZIG_VERSION_MINOR), + u32(c.ZIG_VERSION_PATCH), + ); + const flags = c""; + const runtime_version = 0; + const compile_unit_file = llvm.CreateFile( + dibuilder, + comp.name.ptr(), + comp.root_package.root_src_dir.ptr(), + ) orelse return error.OutOfMemory; + const is_optimized = comp.build_mode != builtin.Mode.Debug; + const compile_unit = llvm.CreateCompileUnit( + dibuilder, + DW.LANG_C99, + compile_unit_file, + producer.ptr(), + is_optimized, + flags, + runtime_version, + c"", + 0, + !comp.strip, + ) orelse return error.OutOfMemory; + var ofile = ObjectFile{ .comp = comp, .module = module, .builder = builder, + .dibuilder = dibuilder, .context = context, .lock = event.Lock.init(comp.loop), }; @@ -41,8 +87,7 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) // LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm)); //} - // TODO - //ZigLLVMDIBuilderFinalize(g->dbuilder); + llvm.DIBuilderFinalize(dibuilder); if (comp.verbose_llvm_ir) { llvm.DumpModule(ofile.module); @@ -53,17 +98,42 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) var error_ptr: ?[*]u8 = null; _ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr); } + + assert(comp.emit_file_type == Compilation.Emit.Binary); // TODO support other types + + const is_small = comp.build_mode == builtin.Mode.ReleaseSmall; + const is_debug = comp.build_mode == builtin.Mode.Debug; + + var err_msg: [*]u8 = undefined; + // TODO integrate this with evented I/O + if (llvm.TargetMachineEmitToFile( + comp.target_machine, + module, + output_path.ptr(), + llvm.EmitBinary, + &err_msg, + is_debug, + is_small, + )) { + if (std.debug.runtime_safety) { + std.debug.panic("unable to write object file {}: {s}\n", output_path.toSliceConst(), err_msg); + } + return error.WritingObjectFileFailed; + } + //validate_inline_fns(g); TODO + fn_val.containing_object = output_path; } pub const ObjectFile = struct { comp: *Compilation, module: llvm.ModuleRef, builder: llvm.BuilderRef, + dibuilder: *llvm.DIBuilder, context: llvm.ContextRef, lock: event.Lock, - fn a(self: *ObjectFile) *std.mem.Allocator { - return self.comp.a(); + fn gpa(self: *ObjectFile) *std.mem.Allocator { + return self.comp.gpa(); } }; diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig index 1dbbf21206..d5380a0644 100644 --- a/src-self-hosted/compilation.zig +++ b/src-self-hosted/compilation.zig @@ -26,16 +26,31 @@ const Value = @import("value.zig").Value; const Type = Value.Type; const Span = errmsg.Span; const codegen = @import("codegen.zig"); +const Package = @import("package.zig").Package; /// Data that is local to the event loop. pub const EventLoopLocal = struct { loop: *event.Loop, llvm_handle_pool: std.atomic.Stack(llvm.ContextRef), - fn init(loop: *event.Loop) EventLoopLocal { + /// TODO pool these so that it doesn't have to lock + prng: event.Locked(std.rand.DefaultPrng), + + var lazy_init_targets = std.lazyInit(void); + + fn init(loop: *event.Loop) !EventLoopLocal { + lazy_init_targets.get() orelse { + Target.initializeAll(); + lazy_init_targets.resolve(); + }; + + var seed_bytes: [@sizeOf(u64)]u8 = undefined; + try std.os.getRandomBytes(seed_bytes[0..]); + const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big); return EventLoopLocal{ .loop = loop, .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(), + .prng = event.Locked(std.rand.DefaultPrng).init(loop, std.rand.DefaultPrng.init(seed)), }; } @@ -76,10 +91,16 @@ pub const Compilation = struct { event_loop_local: *EventLoopLocal, loop: *event.Loop, name: Buffer, + llvm_triple: Buffer, root_src_path: ?[]const u8, target: Target, + llvm_target: llvm.TargetRef, build_mode: builtin.Mode, zig_lib_dir: []const u8, + zig_std_dir: []const u8, + + /// lazily created when we need it + tmp_dir: event.Future(BuildError![]u8), version_major: u32, version_minor: u32, @@ -106,8 +127,16 @@ pub const Compilation = struct { lib_dirs: []const []const u8, rpath_list: []const []const u8, assembly_files: []const []const u8, + + /// paths that are explicitly provided by the user to link against link_objects: []const []const u8, + /// functions that have their own objects that we need to link + /// it uses an optional pointer so that tombstone removals are possible + fn_link_set: event.Locked(FnLinkSet), + + pub const FnLinkSet = std.LinkedList(?*Value.Fn); + windows_subsystem_windows: bool, windows_subsystem_console: bool, @@ -141,7 +170,7 @@ pub const Compilation = struct { /// Before code generation starts, must wait on this group to make sure /// the build is complete. - build_group: event.Group(BuildError!void), + prelink_group: event.Group(BuildError!void), compile_errors: event.Locked(CompileErrList), @@ -155,6 +184,16 @@ pub const Compilation = struct { false_value: *Value.Bool, noreturn_value: *Value.NoReturn, + target_machine: llvm.TargetMachineRef, + target_data_ref: llvm.TargetDataRef, + target_layout_str: [*]u8, + + /// for allocating things which have the same lifetime as this Compilation + arena_allocator: std.heap.ArenaAllocator, + + root_package: *Package, + std_package: *Package, + const CompileErrList = std.ArrayList(*errmsg.Msg); // TODO handle some of these earlier and report them in a way other than error codes @@ -195,6 +234,9 @@ pub const Compilation = struct { BufferTooSmall, Unimplemented, // TODO remove this one SemanticAnalysisFailed, // TODO remove this one + ReadOnlyFileSystem, + LinkQuotaExceeded, + EnvironmentVariableNotFound, }; pub const Event = union(enum) { @@ -234,31 +276,31 @@ pub const Compilation = struct { event_loop_local: *EventLoopLocal, name: []const u8, root_src_path: ?[]const u8, - target: *const Target, + target: Target, kind: Kind, build_mode: builtin.Mode, + is_static: bool, zig_lib_dir: []const u8, cache_dir: []const u8, ) !*Compilation { const loop = event_loop_local.loop; - - var name_buffer = try Buffer.init(loop.allocator, name); - errdefer name_buffer.deinit(); - - const events = try event.Channel(Event).create(loop, 0); - errdefer events.destroy(); - - const comp = try loop.allocator.create(Compilation{ + const comp = try event_loop_local.loop.allocator.create(Compilation{ .loop = loop, + .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator), .event_loop_local = event_loop_local, - .events = events, - .name = name_buffer, + .events = undefined, .root_src_path = root_src_path, - .target = target.*, + .target = target, + .llvm_target = undefined, .kind = kind, .build_mode = build_mode, .zig_lib_dir = zig_lib_dir, + .zig_std_dir = undefined, .cache_dir = cache_dir, + .tmp_dir = event.Future(BuildError![]u8).init(loop), + + .name = undefined, + .llvm_triple = undefined, .version_major = 0, .version_minor = 0, @@ -283,7 +325,7 @@ pub const Compilation = struct { .is_test = false, .each_lib_rpath = false, .strip = false, - .is_static = false, + .is_static = is_static, .linker_rdynamic = false, .clang_argv = [][]const u8{}, .llvm_argv = [][]const u8{}, @@ -291,9 +333,10 @@ pub const Compilation = struct { .rpath_list = [][]const u8{}, .assembly_files = [][]const u8{}, .link_objects = [][]const u8{}, + .fn_link_set = event.Locked(FnLinkSet).init(loop, FnLinkSet.init()), .windows_subsystem_windows = false, .windows_subsystem_console = false, - .link_libs_list = ArrayList(*LinkLib).init(loop.allocator), + .link_libs_list = undefined, .libc_link_lib = null, .err_color = errmsg.Color.Auto, .darwin_frameworks = [][]const u8{}, @@ -303,7 +346,7 @@ pub const Compilation = struct { .emit_file_type = Emit.Binary, .link_out_file = null, .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)), - .build_group = event.Group(BuildError!void).init(loop), + .prelink_group = event.Group(BuildError!void).init(loop), .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)), .meta_type = undefined, @@ -314,13 +357,82 @@ pub const Compilation = struct { .false_value = undefined, .noreturn_type = undefined, .noreturn_value = undefined, + + .target_machine = undefined, + .target_data_ref = undefined, + .target_layout_str = undefined, + + .root_package = undefined, + .std_package = undefined, }); + errdefer { + comp.arena_allocator.deinit(); + comp.loop.allocator.destroy(comp); + } + + comp.name = try Buffer.init(comp.arena(), name); + comp.llvm_triple = try target.getTriple(comp.arena()); + comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple); + comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena()); + comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std"); + + const opt_level = switch (build_mode) { + builtin.Mode.Debug => llvm.CodeGenLevelNone, + else => llvm.CodeGenLevelAggressive, + }; + + const reloc_mode = if (is_static) llvm.RelocStatic else llvm.RelocPIC; + + // LLVM creates invalid binaries on Windows sometimes. + // See https://github.com/ziglang/zig/issues/508 + // As a workaround we do not use target native features on Windows. + var target_specific_cpu_args: ?[*]u8 = null; + var target_specific_cpu_features: ?[*]u8 = null; + errdefer llvm.DisposeMessage(target_specific_cpu_args); + errdefer llvm.DisposeMessage(target_specific_cpu_features); + if (target == Target.Native and !target.isWindows()) { + target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory; + target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory; + } + + comp.target_machine = llvm.CreateTargetMachine( + comp.llvm_target, + comp.llvm_triple.ptr(), + target_specific_cpu_args orelse c"", + target_specific_cpu_features orelse c"", + opt_level, + reloc_mode, + llvm.CodeModelDefault, + ) orelse return error.OutOfMemory; + errdefer llvm.DisposeTargetMachine(comp.target_machine); + + comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory; + errdefer llvm.DisposeTargetData(comp.target_data_ref); + + comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory; + errdefer llvm.DisposeMessage(comp.target_layout_str); + + comp.events = try event.Channel(Event).create(comp.loop, 0); + errdefer comp.events.destroy(); + + if (root_src_path) |root_src| { + const dirname = std.os.path.dirname(root_src) orelse "."; + const basename = std.os.path.basename(root_src); + + comp.root_package = try Package.create(comp.arena(), dirname, basename); + comp.std_package = try Package.create(comp.arena(), comp.zig_std_dir, "index.zig"); + try comp.root_package.add("std", comp.std_package); + } else { + comp.root_package = try Package.create(comp.arena(), ".", ""); + } + try comp.initTypes(); + return comp; } fn initTypes(comp: *Compilation) !void { - comp.meta_type = try comp.a().create(Type.MetaType{ + comp.meta_type = try comp.gpa().create(Type.MetaType{ .base = Type{ .base = Value{ .id = Value.Id.Type, @@ -333,9 +445,9 @@ pub const Compilation = struct { }); comp.meta_type.value = &comp.meta_type.base; comp.meta_type.base.base.typeof = &comp.meta_type.base; - errdefer comp.a().destroy(comp.meta_type); + errdefer comp.gpa().destroy(comp.meta_type); - comp.void_type = try comp.a().create(Type.Void{ + comp.void_type = try comp.gpa().create(Type.Void{ .base = Type{ .base = Value{ .id = Value.Id.Type, @@ -345,9 +457,9 @@ pub const Compilation = struct { .id = builtin.TypeId.Void, }, }); - errdefer comp.a().destroy(comp.void_type); + errdefer comp.gpa().destroy(comp.void_type); - comp.noreturn_type = try comp.a().create(Type.NoReturn{ + comp.noreturn_type = try comp.gpa().create(Type.NoReturn{ .base = Type{ .base = Value{ .id = Value.Id.Type, @@ -357,9 +469,9 @@ pub const Compilation = struct { .id = builtin.TypeId.NoReturn, }, }); - errdefer comp.a().destroy(comp.noreturn_type); + errdefer comp.gpa().destroy(comp.noreturn_type); - comp.bool_type = try comp.a().create(Type.Bool{ + comp.bool_type = try comp.gpa().create(Type.Bool{ .base = Type{ .base = Value{ .id = Value.Id.Type, @@ -369,18 +481,18 @@ pub const Compilation = struct { .id = builtin.TypeId.Bool, }, }); - errdefer comp.a().destroy(comp.bool_type); + errdefer comp.gpa().destroy(comp.bool_type); - comp.void_value = try comp.a().create(Value.Void{ + comp.void_value = try comp.gpa().create(Value.Void{ .base = Value{ .id = Value.Id.Void, .typeof = &Type.Void.get(comp).base, .ref_count = std.atomic.Int(usize).init(1), }, }); - errdefer comp.a().destroy(comp.void_value); + errdefer comp.gpa().destroy(comp.void_value); - comp.true_value = try comp.a().create(Value.Bool{ + comp.true_value = try comp.gpa().create(Value.Bool{ .base = Value{ .id = Value.Id.Bool, .typeof = &Type.Bool.get(comp).base, @@ -388,9 +500,9 @@ pub const Compilation = struct { }, .x = true, }); - errdefer comp.a().destroy(comp.true_value); + errdefer comp.gpa().destroy(comp.true_value); - comp.false_value = try comp.a().create(Value.Bool{ + comp.false_value = try comp.gpa().create(Value.Bool{ .base = Value{ .id = Value.Id.Bool, .typeof = &Type.Bool.get(comp).base, @@ -398,19 +510,23 @@ pub const Compilation = struct { }, .x = false, }); - errdefer comp.a().destroy(comp.false_value); + errdefer comp.gpa().destroy(comp.false_value); - comp.noreturn_value = try comp.a().create(Value.NoReturn{ + comp.noreturn_value = try comp.gpa().create(Value.NoReturn{ .base = Value{ .id = Value.Id.NoReturn, .typeof = &Type.NoReturn.get(comp).base, .ref_count = std.atomic.Int(usize).init(1), }, }); - errdefer comp.a().destroy(comp.noreturn_value); + errdefer comp.gpa().destroy(comp.noreturn_value); } pub fn destroy(self: *Compilation) void { + if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| { + os.deleteTree(self.arena(), tmp_dir) catch {}; + } else |_| {}; + self.noreturn_value.base.deref(self); self.void_value.base.deref(self); self.false_value.base.deref(self); @@ -420,14 +536,18 @@ pub const Compilation = struct { self.meta_type.base.base.deref(self); self.events.destroy(); - self.name.deinit(); - self.a().destroy(self); + llvm.DisposeMessage(self.target_layout_str); + llvm.DisposeTargetData(self.target_data_ref); + llvm.DisposeTargetMachine(self.target_machine); + + self.arena_allocator.deinit(); + self.gpa().destroy(self); } pub fn build(self: *Compilation) !void { if (self.llvm_argv.len != 0) { - var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{ + var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{ [][]const u8{"zig (LLVM option parsing)"}, self.llvm_argv, }); @@ -436,7 +556,7 @@ pub const Compilation = struct { c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr); } - _ = try async self.buildAsync(); + _ = try async self.buildAsync(); } async fn buildAsync(self: *Compilation) void { @@ -464,7 +584,7 @@ pub const Compilation = struct { } } else |err| { // if there's an error then the compile errors have dangling references - self.a().free(compile_errors); + self.gpa().free(compile_errors); await (async self.events.put(Event{ .Error = err }) catch unreachable); } @@ -477,26 +597,26 @@ pub const Compilation = struct { async fn addRootSrc(self: *Compilation) !void { const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path"); // TODO async/await os.path.real - const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| { + const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| { try printError("unable to get real path '{}': {}", root_src_path, err); return err; }; - errdefer self.a().free(root_src_real_path); + errdefer self.gpa().free(root_src_real_path); // TODO async/await readFileAlloc() - const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| { + const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| { try printError("unable to open '{}': {}", root_src_real_path, err); return err; }; - errdefer self.a().free(source_code); + errdefer self.gpa().free(source_code); - const parsed_file = try self.a().create(ParsedFile{ + const parsed_file = try self.gpa().create(ParsedFile{ .tree = undefined, .realpath = root_src_real_path, }); - errdefer self.a().destroy(parsed_file); + errdefer self.gpa().destroy(parsed_file); - parsed_file.tree = try std.zig.parse(self.a(), source_code); + parsed_file.tree = try std.zig.parse(self.gpa(), source_code); errdefer parsed_file.tree.deinit(); const tree = &parsed_file.tree; @@ -525,7 +645,7 @@ pub const Compilation = struct { continue; }; - const fn_decl = try self.a().create(Decl.Fn{ + const fn_decl = try self.gpa().create(Decl.Fn{ .base = Decl{ .id = Decl.Id.Fn, .name = name, @@ -538,7 +658,7 @@ pub const Compilation = struct { .value = Decl.Fn.Val{ .Unresolved = {} }, .fn_proto = fn_proto, }); - errdefer self.a().destroy(fn_decl); + errdefer self.gpa().destroy(fn_decl); try decl_group.call(addTopLevelDecl, self, &fn_decl.base); }, @@ -547,15 +667,15 @@ pub const Compilation = struct { } } try await (async decl_group.wait() catch unreachable); - try await (async self.build_group.wait() catch unreachable); + try await (async self.prelink_group.wait() catch unreachable); } async fn addTopLevelDecl(self: *Compilation, decl: *Decl) !void { const is_export = decl.isExported(&decl.parsed_file.tree); if (is_export) { - try self.build_group.call(verifyUniqueSymbol, self, decl); - try self.build_group.call(resolveDecl, self, decl); + try self.prelink_group.call(verifyUniqueSymbol, self, decl); + try self.prelink_group.call(resolveDecl, self, decl); } } @@ -563,7 +683,7 @@ pub const Compilation = struct { const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args); errdefer self.loop.allocator.free(text); - try self.build_group.call(addCompileErrorAsync, self, parsed_file, span, text); + try self.prelink_group.call(addCompileErrorAsync, self, parsed_file, span, text); } async fn addCompileErrorAsync( @@ -625,11 +745,11 @@ pub const Compilation = struct { } } - const link_lib = try self.a().create(LinkLib{ + const link_lib = try self.gpa().create(LinkLib{ .name = name, .path = null, .provided_explicitly = provided_explicitly, - .symbols = ArrayList([]u8).init(self.a()), + .symbols = ArrayList([]u8).init(self.gpa()), }); try self.link_libs_list.append(link_lib); if (is_libc) { @@ -638,9 +758,71 @@ pub const Compilation = struct { return link_lib; } - fn a(self: Compilation) *mem.Allocator { + /// General Purpose Allocator. Must free when done. + fn gpa(self: Compilation) *mem.Allocator { return self.loop.allocator; } + + /// Arena Allocator. Automatically freed when the Compilation is destroyed. + fn arena(self: *Compilation) *mem.Allocator { + return &self.arena_allocator.allocator; + } + + /// If the temporary directory for this compilation has not been created, it creates it. + /// Then it creates a random file name in that dir and returns it. + pub async fn createRandomOutputPath(self: *Compilation, suffix: []const u8) !Buffer { + const tmp_dir = try await (async self.getTmpDir() catch unreachable); + const file_prefix = await (async self.getRandomFileName() catch unreachable); + + const file_name = try std.fmt.allocPrint(self.gpa(), "{}{}", file_prefix[0..], suffix); + defer self.gpa().free(file_name); + + const full_path = try os.path.join(self.gpa(), tmp_dir, file_name[0..]); + errdefer self.gpa().free(full_path); + + return Buffer.fromOwnedSlice(self.gpa(), full_path); + } + + /// If the temporary directory for this Compilation has not been created, creates it. + /// Then returns it. The directory is unique to this Compilation and cleaned up when + /// the Compilation deinitializes. + async fn getTmpDir(self: *Compilation) ![]const u8 { + if (await (async self.tmp_dir.start() catch unreachable)) |ptr| return ptr.*; + self.tmp_dir.data = await (async self.getTmpDirImpl() catch unreachable); + self.tmp_dir.resolve(); + return self.tmp_dir.data; + } + + async fn getTmpDirImpl(self: *Compilation) ![]u8 { + const comp_dir_name = await (async self.getRandomFileName() catch unreachable); + const zig_dir_path = try getZigDir(self.gpa()); + defer self.gpa().free(zig_dir_path); + + const tmp_dir = try os.path.join(self.arena(), zig_dir_path, comp_dir_name[0..]); + try os.makePath(self.gpa(), tmp_dir); + return tmp_dir; + } + + async fn getRandomFileName(self: *Compilation) [12]u8 { + // here we replace the standard +/ with -_ so that it can be used in a file name + const b64_fs_encoder = std.base64.Base64Encoder.init( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", + std.base64.standard_pad_char, + ); + + var rand_bytes: [9]u8 = undefined; + + { + const held = await (async self.event_loop_local.prng.acquire() catch unreachable); + defer held.release(); + + held.value.random.bytes(rand_bytes[0..]); + } + + var result: [12]u8 = undefined; + b64_fs_encoder.encode(result[0..], rand_bytes); + return result; + } }; fn printError(comptime format: []const u8, args: ...) !void { @@ -662,13 +844,11 @@ fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib /// This declaration has been blessed as going into the final code generation. pub async fn resolveDecl(comp: *Compilation, decl: *Decl) !void { - if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) { - decl.resolution.data = await (async generateDecl(comp, decl) catch unreachable); - decl.resolution.resolve(); - return decl.resolution.data; - } else { - return (await (async decl.resolution.get() catch unreachable)).*; - } + if (await (async decl.resolution.start() catch unreachable)) |ptr| return ptr.*; + + decl.resolution.data = await (async generateDecl(comp, decl) catch unreachable); + decl.resolution.resolve(); + return decl.resolution.data; } /// The function that actually does the generation. @@ -698,7 +878,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { const fn_type = try Type.Fn.create(comp, return_type, params, is_var_args); defer fn_type.base.base.deref(comp); - var symbol_name = try std.Buffer.init(comp.a(), fn_decl.base.name); + var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name); errdefer symbol_name.deinit(); const fn_val = try Value.Fn.create(comp, fn_type, fndef_scope, symbol_name); @@ -719,7 +899,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { error.SemanticAnalysisFailed => return {}, else => return err, }; - defer unanalyzed_code.destroy(comp.a()); + defer unanalyzed_code.destroy(comp.gpa()); if (comp.verbose_ir) { std.debug.warn("unanalyzed:\n"); @@ -738,7 +918,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { error.SemanticAnalysisFailed => return {}, else => return err, }; - errdefer analyzed_code.destroy(comp.a()); + errdefer analyzed_code.destroy(comp.gpa()); if (comp.verbose_ir) { std.debug.warn("analyzed:\n"); @@ -747,5 +927,30 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { // Kick off rendering to LLVM module, but it doesn't block the fn decl // analysis from being complete. - try comp.build_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code); + try comp.prelink_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code); + try comp.prelink_group.call(addFnToLinkSet, comp, fn_val); +} + +async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) void { + fn_val.base.ref(); + defer fn_val.base.deref(comp); + + fn_val.link_set_node.data = fn_val; + + const held = await (async comp.fn_link_set.acquire() catch unreachable); + defer held.release(); + + held.value.append(fn_val.link_set_node); +} + +fn getZigDir(allocator: *mem.Allocator) ![]u8 { + const home_dir = try getHomeDir(allocator); + defer allocator.free(home_dir); + + return os.path.join(allocator, home_dir, ".zig"); +} + +/// TODO move to zig std lib, and make it work for other OSes +fn getHomeDir(allocator: *mem.Allocator) ![]u8 { + return os.getEnvVarOwned(allocator, "HOME"); } diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 0e0a4f9bf3..c1f9c97001 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -453,7 +453,7 @@ pub const Code = struct { arena: std.heap.ArenaAllocator, return_type: ?*Type, - /// allocator is comp.a() + /// allocator is comp.gpa() pub fn destroy(self: *Code, allocator: *Allocator) void { self.arena.deinit(); allocator.destroy(self); @@ -483,13 +483,13 @@ pub const Builder = struct { pub const Error = Analyze.Error; pub fn init(comp: *Compilation, parsed_file: *ParsedFile) !Builder { - const code = try comp.a().create(Code{ + const code = try comp.gpa().create(Code{ .basic_block_list = undefined, - .arena = std.heap.ArenaAllocator.init(comp.a()), + .arena = std.heap.ArenaAllocator.init(comp.gpa()), .return_type = null, }); code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator); - errdefer code.destroy(comp.a()); + errdefer code.destroy(comp.gpa()); return Builder{ .comp = comp, @@ -502,7 +502,7 @@ pub const Builder = struct { } pub fn abort(self: *Builder) void { - self.code.destroy(self.comp.a()); + self.code.destroy(self.comp.gpa()); } /// Call code.destroy() when done diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig index 13480dc2c6..b196656367 100644 --- a/src-self-hosted/llvm.zig +++ b/src-self-hosted/llvm.zig @@ -2,6 +2,12 @@ const builtin = @import("builtin"); const c = @import("c.zig"); const assert = @import("std").debug.assert; +// we wrap the c module for 3 reasons: +// 1. to avoid accidentally calling the non-thread-safe functions +// 2. patch up some of the types to remove nullability +// 3. some functions have been augmented by zig_llvm.cpp to be more powerful, +// such as ZigLLVMTargetMachineEmitToFile + pub const AttributeIndex = c_uint; pub const Bool = c_int; @@ -12,25 +18,51 @@ pub const ValueRef = removeNullability(c.LLVMValueRef); pub const TypeRef = removeNullability(c.LLVMTypeRef); pub const BasicBlockRef = removeNullability(c.LLVMBasicBlockRef); pub const AttributeRef = removeNullability(c.LLVMAttributeRef); +pub const TargetRef = removeNullability(c.LLVMTargetRef); +pub const TargetMachineRef = removeNullability(c.LLVMTargetMachineRef); +pub const TargetDataRef = removeNullability(c.LLVMTargetDataRef); +pub const DIBuilder = c.ZigLLVMDIBuilder; pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex; pub const AddFunction = c.LLVMAddFunction; +pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag; +pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag; pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation; +pub const ConstAllOnes = c.LLVMConstAllOnes; pub const ConstInt = c.LLVMConstInt; +pub const ConstNull = c.LLVMConstNull; pub const ConstStringInContext = c.LLVMConstStringInContext; pub const ConstStructInContext = c.LLVMConstStructInContext; +pub const CopyStringRepOfTargetData = c.LLVMCopyStringRepOfTargetData; pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext; +pub const CreateCompileUnit = c.ZigLLVMCreateCompileUnit; +pub const CreateDIBuilder = c.ZigLLVMCreateDIBuilder; pub const CreateEnumAttribute = c.LLVMCreateEnumAttribute; +pub const CreateFile = c.ZigLLVMCreateFile; pub const CreateStringAttribute = c.LLVMCreateStringAttribute; +pub const CreateTargetDataLayout = c.LLVMCreateTargetDataLayout; +pub const CreateTargetMachine = c.LLVMCreateTargetMachine; +pub const DIBuilderFinalize = c.ZigLLVMDIBuilderFinalize; pub const DisposeBuilder = c.LLVMDisposeBuilder; +pub const DisposeDIBuilder = c.ZigLLVMDisposeDIBuilder; +pub const DisposeMessage = c.LLVMDisposeMessage; pub const DisposeModule = c.LLVMDisposeModule; +pub const DisposeTargetData = c.LLVMDisposeTargetData; +pub const DisposeTargetMachine = c.LLVMDisposeTargetMachine; pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext; pub const DumpModule = c.LLVMDumpModule; pub const FP128TypeInContext = c.LLVMFP128TypeInContext; pub const FloatTypeInContext = c.LLVMFloatTypeInContext; pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName; +pub const GetHostCPUName = c.ZigLLVMGetHostCPUName; pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext; +pub const GetNativeFeatures = c.ZigLLVMGetNativeFeatures; pub const HalfTypeInContext = c.LLVMHalfTypeInContext; +pub const InitializeAllAsmParsers = c.LLVMInitializeAllAsmParsers; +pub const InitializeAllAsmPrinters = c.LLVMInitializeAllAsmPrinters; +pub const InitializeAllTargetInfos = c.LLVMInitializeAllTargetInfos; +pub const InitializeAllTargetMCs = c.LLVMInitializeAllTargetMCs; +pub const InitializeAllTargets = c.LLVMInitializeAllTargets; pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext; pub const Int128TypeInContext = c.LLVMInt128TypeInContext; pub const Int16TypeInContext = c.LLVMInt16TypeInContext; @@ -47,13 +79,16 @@ pub const MDStringInContext = c.LLVMMDStringInContext; pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext; pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext; pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext; +pub const SetDataLayout = c.LLVMSetDataLayout; +pub const SetTarget = c.LLVMSetTarget; pub const StructTypeInContext = c.LLVMStructTypeInContext; pub const TokenTypeInContext = c.LLVMTokenTypeInContext; pub const VoidTypeInContext = c.LLVMVoidTypeInContext; pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext; pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext; -pub const ConstAllOnes = c.LLVMConstAllOnes; -pub const ConstNull = c.LLVMConstNull; + +pub const GetTargetFromTriple = LLVMGetTargetFromTriple; +extern fn LLVMGetTargetFromTriple(Triple: [*]const u8, T: *TargetRef, ErrorMessage: ?*[*]u8) Bool; pub const VerifyModule = LLVMVerifyModule; extern fn LLVMVerifyModule(M: ModuleRef, Action: VerifierFailureAction, OutMessage: *?[*]u8) Bool; @@ -83,6 +118,31 @@ pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction; pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction; pub const VerifierFailureAction = c.LLVMVerifierFailureAction; +pub const CodeGenLevelNone = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelNone; +pub const CodeGenLevelLess = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelLess; +pub const CodeGenLevelDefault = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelDefault; +pub const CodeGenLevelAggressive = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelAggressive; +pub const CodeGenOptLevel = c.LLVMCodeGenOptLevel; + +pub const RelocDefault = c.LLVMRelocMode.LLVMRelocDefault; +pub const RelocStatic = c.LLVMRelocMode.LLVMRelocStatic; +pub const RelocPIC = c.LLVMRelocMode.LLVMRelocPIC; +pub const RelocDynamicNoPic = c.LLVMRelocMode.LLVMRelocDynamicNoPic; +pub const RelocMode = c.LLVMRelocMode; + +pub const CodeModelDefault = c.LLVMCodeModel.LLVMCodeModelDefault; +pub const CodeModelJITDefault = c.LLVMCodeModel.LLVMCodeModelJITDefault; +pub const CodeModelSmall = c.LLVMCodeModel.LLVMCodeModelSmall; +pub const CodeModelKernel = c.LLVMCodeModel.LLVMCodeModelKernel; +pub const CodeModelMedium = c.LLVMCodeModel.LLVMCodeModelMedium; +pub const CodeModelLarge = c.LLVMCodeModel.LLVMCodeModelLarge; +pub const CodeModel = c.LLVMCodeModel; + +pub const EmitAssembly = EmitOutputType.ZigLLVM_EmitAssembly; +pub const EmitBinary = EmitOutputType.ZigLLVM_EmitBinary; +pub const EmitLLVMIr = EmitOutputType.ZigLLVM_EmitLLVMIr; +pub const EmitOutputType = c.ZigLLVM_EmitOutputType; + fn removeNullability(comptime T: type) type { comptime assert(@typeId(T) == builtin.TypeId.Optional); return T.Child; @@ -90,3 +150,14 @@ fn removeNullability(comptime T: type) type { pub const BuildRet = LLVMBuildRet; extern fn LLVMBuildRet(arg0: BuilderRef, V: ?ValueRef) ValueRef; + +pub const TargetMachineEmitToFile = ZigLLVMTargetMachineEmitToFile; +extern fn ZigLLVMTargetMachineEmitToFile( + targ_machine_ref: TargetMachineRef, + module_ref: ModuleRef, + filename: [*]const u8, + output_type: EmitOutputType, + error_message: *[*]u8, + is_debug: bool, + is_small: bool, +) bool; diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index c9478954c5..8b668e35bd 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -363,6 +363,8 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co } }; + const is_static = flags.present("static"); + const assembly_files = flags.many("assembly"); const link_objects = flags.many("object"); if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) { @@ -389,7 +391,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co try loop.initMultiThreaded(allocator); defer loop.deinit(); - var event_loop_local = EventLoopLocal.init(&loop); + var event_loop_local = try EventLoopLocal.init(&loop); defer event_loop_local.deinit(); var comp = try Compilation.create( @@ -399,6 +401,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co Target.Native, out_type, build_mode, + is_static, zig_lib_dir, full_cache_dir, ); @@ -426,7 +429,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co comp.clang_argv = clang_argv_buf.toSliceConst(); comp.strip = flags.present("strip"); - comp.is_static = flags.present("static"); if (flags.single("libc-lib-dir")) |libc_lib_dir| { comp.libc_lib_dir = libc_lib_dir; @@ -481,9 +483,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co } comp.emit_file_type = emit_type; - comp.link_objects = link_objects; comp.assembly_files = assembly_files; comp.link_out_file = flags.single("out-file"); + comp.link_objects = link_objects; try comp.build(); const process_build_events_handle = try async processBuildEvents(comp, color); diff --git a/src-self-hosted/package.zig b/src-self-hosted/package.zig new file mode 100644 index 0000000000..720b279651 --- /dev/null +++ b/src-self-hosted/package.zig @@ -0,0 +1,29 @@ +const std = @import("std"); +const mem = std.mem; +const assert = std.debug.assert; +const Buffer = std.Buffer; + +pub const Package = struct { + root_src_dir: Buffer, + root_src_path: Buffer, + + /// relative to root_src_dir + table: Table, + + pub const Table = std.HashMap([]const u8, *Package, mem.hash_slice_u8, mem.eql_slice_u8); + + /// makes internal copies of root_src_dir and root_src_path + /// allocator should be an arena allocator because Package never frees anything + pub fn create(allocator: *mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package { + return allocator.create(Package{ + .root_src_dir = try Buffer.init(allocator, root_src_dir), + .root_src_path = try Buffer.init(allocator, root_src_path), + .table = Table.init(allocator), + }); + } + + pub fn add(self: *Package, name: []const u8, package: *Package) !void { + const entry = try self.table.put(try mem.dupe(self.table.allocator, u8, name), package); + assert(entry == null); + } +}; diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig index 4326617fa0..1c519d6c08 100644 --- a/src-self-hosted/scope.zig +++ b/src-self-hosted/scope.zig @@ -64,7 +64,7 @@ pub const Scope = struct { /// Creates a Decls scope with 1 reference pub fn create(comp: *Compilation, parent: ?*Scope) !*Decls { - const self = try comp.a().create(Decls{ + const self = try comp.gpa().create(Decls{ .base = Scope{ .id = Id.Decls, .parent = parent, @@ -72,9 +72,9 @@ pub const Scope = struct { }, .table = undefined, }); - errdefer comp.a().destroy(self); + errdefer comp.gpa().destroy(self); - self.table = Decl.Table.init(comp.a()); + self.table = Decl.Table.init(comp.gpa()); errdefer self.table.deinit(); if (parent) |p| p.ref(); @@ -126,7 +126,7 @@ pub const Scope = struct { /// Creates a Block scope with 1 reference pub fn create(comp: *Compilation, parent: ?*Scope) !*Block { - const self = try comp.a().create(Block{ + const self = try comp.gpa().create(Block{ .base = Scope{ .id = Id.Block, .parent = parent, @@ -138,14 +138,14 @@ pub const Scope = struct { .is_comptime = undefined, .safety = Safety.Auto, }); - errdefer comp.a().destroy(self); + errdefer comp.gpa().destroy(self); if (parent) |p| p.ref(); return self; } pub fn destroy(self: *Block, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -158,7 +158,7 @@ pub const Scope = struct { /// Creates a FnDef scope with 1 reference /// Must set the fn_val later pub fn create(comp: *Compilation, parent: ?*Scope) !*FnDef { - const self = try comp.a().create(FnDef{ + const self = try comp.gpa().create(FnDef{ .base = Scope{ .id = Id.FnDef, .parent = parent, @@ -173,7 +173,7 @@ pub const Scope = struct { } pub fn destroy(self: *FnDef, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -182,7 +182,7 @@ pub const Scope = struct { /// Creates a CompTime scope with 1 reference pub fn create(comp: *Compilation, parent: ?*Scope) !*CompTime { - const self = try comp.a().create(CompTime{ + const self = try comp.gpa().create(CompTime{ .base = Scope{ .id = Id.CompTime, .parent = parent, @@ -195,7 +195,7 @@ pub const Scope = struct { } pub fn destroy(self: *CompTime, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -216,7 +216,7 @@ pub const Scope = struct { kind: Kind, defer_expr_scope: *DeferExpr, ) !*Defer { - const self = try comp.a().create(Defer{ + const self = try comp.gpa().create(Defer{ .base = Scope{ .id = Id.Defer, .parent = parent, @@ -225,7 +225,7 @@ pub const Scope = struct { .defer_expr_scope = defer_expr_scope, .kind = kind, }); - errdefer comp.a().destroy(self); + errdefer comp.gpa().destroy(self); defer_expr_scope.base.ref(); @@ -235,7 +235,7 @@ pub const Scope = struct { pub fn destroy(self: *Defer, comp: *Compilation) void { self.defer_expr_scope.base.deref(comp); - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -245,7 +245,7 @@ pub const Scope = struct { /// Creates a DeferExpr scope with 1 reference pub fn create(comp: *Compilation, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr { - const self = try comp.a().create(DeferExpr{ + const self = try comp.gpa().create(DeferExpr{ .base = Scope{ .id = Id.DeferExpr, .parent = parent, @@ -253,14 +253,14 @@ pub const Scope = struct { }, .expr_node = expr_node, }); - errdefer comp.a().destroy(self); + errdefer comp.gpa().destroy(self); if (parent) |p| p.ref(); return self; } pub fn destroy(self: *DeferExpr, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; }; diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig index 724d99ea23..db673e421a 100644 --- a/src-self-hosted/target.zig +++ b/src-self-hosted/target.zig @@ -1,60 +1,118 @@ +const std = @import("std"); const builtin = @import("builtin"); -const c = @import("c.zig"); - -pub const CrossTarget = struct { - arch: builtin.Arch, - os: builtin.Os, - environ: builtin.Environ, -}; +const llvm = @import("llvm.zig"); pub const Target = union(enum) { Native, - Cross: CrossTarget, + Cross: Cross, - pub fn oFileExt(self: *const Target) []const u8 { - const environ = switch (self.*) { - Target.Native => builtin.environ, - Target.Cross => |t| t.environ, - }; - return switch (environ) { - builtin.Environ.msvc => ".obj", + pub const Cross = struct { + arch: builtin.Arch, + os: builtin.Os, + environ: builtin.Environ, + object_format: builtin.ObjectFormat, + }; + + pub fn oFileExt(self: Target) []const u8 { + return switch (self.getObjectFormat()) { + builtin.ObjectFormat.coff => ".obj", else => ".o", }; } - pub fn exeFileExt(self: *const Target) []const u8 { + pub fn exeFileExt(self: Target) []const u8 { return switch (self.getOs()) { builtin.Os.windows => ".exe", else => "", }; } - pub fn getOs(self: *const Target) builtin.Os { - return switch (self.*) { + pub fn getOs(self: Target) builtin.Os { + return switch (self) { Target.Native => builtin.os, - Target.Cross => |t| t.os, + @TagType(Target).Cross => |t| t.os, + }; + } + + pub fn getArch(self: Target) builtin.Arch { + return switch (self) { + Target.Native => builtin.arch, + @TagType(Target).Cross => |t| t.arch, + }; + } + + pub fn getEnviron(self: Target) builtin.Environ { + return switch (self) { + Target.Native => builtin.environ, + @TagType(Target).Cross => |t| t.environ, + }; + } + + pub fn getObjectFormat(self: Target) builtin.ObjectFormat { + return switch (self) { + Target.Native => builtin.object_format, + @TagType(Target).Cross => |t| t.object_format, }; } - pub fn isDarwin(self: *const Target) bool { + pub fn isWasm(self: Target) bool { + return switch (self.getArch()) { + builtin.Arch.wasm32, builtin.Arch.wasm64 => true, + else => false, + }; + } + + pub fn isDarwin(self: Target) bool { return switch (self.getOs()) { builtin.Os.ios, builtin.Os.macosx => true, else => false, }; } - pub fn isWindows(self: *const Target) bool { + pub fn isWindows(self: Target) bool { return switch (self.getOs()) { builtin.Os.windows => true, else => false, }; } -}; -pub fn initializeAll() void { - c.LLVMInitializeAllTargets(); - c.LLVMInitializeAllTargetInfos(); - c.LLVMInitializeAllTargetMCs(); - c.LLVMInitializeAllAsmPrinters(); - c.LLVMInitializeAllAsmParsers(); -} + pub fn initializeAll() void { + llvm.InitializeAllTargets(); + llvm.InitializeAllTargetInfos(); + llvm.InitializeAllTargetMCs(); + llvm.InitializeAllAsmPrinters(); + llvm.InitializeAllAsmParsers(); + } + + pub fn getTriple(self: Target, allocator: *std.mem.Allocator) !std.Buffer { + var result = try std.Buffer.initSize(allocator, 0); + errdefer result.deinit(); + + // LLVM WebAssembly output support requires the target to be activated at + // build type with -DCMAKE_LLVM_EXPIERMENTAL_TARGETS_TO_BUILD=WebAssembly. + // + // LLVM determines the output format based on the environment suffix, + // defaulting to an object based on the architecture. The default format in + // LLVM 6 sets the wasm arch output incorrectly to ELF. We need to + // explicitly set this ourself in order for it to work. + // + // This is fixed in LLVM 7 and you will be able to get wasm output by + // using the target triple `wasm32-unknown-unknown-unknown`. + const env_name = if (self.isWasm()) "wasm" else @tagName(self.getEnviron()); + + var out = &std.io.BufferOutStream.init(&result).stream; + try out.print("{}-unknown-{}-{}", @tagName(self.getArch()), @tagName(self.getOs()), env_name); + + return result; + } + + pub fn llvmTargetFromTriple(triple: std.Buffer) !llvm.TargetRef { + var result: llvm.TargetRef = undefined; + var err_msg: [*]u8 = undefined; + if (llvm.GetTargetFromTriple(triple.ptr(), &result, &err_msg) != 0) { + std.debug.warn("triple: {s} error: {s}\n", triple.ptr(), err_msg); + return error.UnsupportedTarget; + } + return result; + } +}; diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig index 3edb267ca9..45e5362124 100644 --- a/src-self-hosted/test.zig +++ b/src-self-hosted/test.zig @@ -46,7 +46,7 @@ pub const TestContext = struct { try self.loop.initMultiThreaded(allocator); errdefer self.loop.deinit(); - self.event_loop_local = EventLoopLocal.init(&self.loop); + self.event_loop_local = try EventLoopLocal.init(&self.loop); errdefer self.event_loop_local.deinit(); self.group = std.event.Group(error!void).init(&self.loop); @@ -107,6 +107,7 @@ pub const TestContext = struct { Target.Native, Compilation.Kind.Obj, builtin.Mode.Debug, + true, // is_static self.zig_lib_dir, self.zig_cache_dir, ); diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index 670547cce2..bb1fb9bb01 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -160,7 +160,7 @@ pub const Type = struct { decls: *Scope.Decls, pub fn destroy(self: *Struct, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Struct, ofile: *ObjectFile) llvm.TypeRef { @@ -180,7 +180,7 @@ pub const Type = struct { }; pub fn create(comp: *Compilation, return_type: *Type, params: []Param, is_var_args: bool) !*Fn { - const result = try comp.a().create(Fn{ + const result = try comp.gpa().create(Fn{ .base = Type{ .base = Value{ .id = Value.Id.Type, @@ -193,7 +193,7 @@ pub const Type = struct { .params = params, .is_var_args = is_var_args, }); - errdefer comp.a().destroy(result); + errdefer comp.gpa().destroy(result); result.return_type.base.ref(); for (result.params) |param| { @@ -207,7 +207,7 @@ pub const Type = struct { for (self.params) |param| { param.typeof.base.deref(comp); } - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Fn, ofile: *ObjectFile) !llvm.TypeRef { @@ -215,8 +215,8 @@ pub const Type = struct { Type.Id.Void => llvm.VoidTypeInContext(ofile.context) orelse return error.OutOfMemory, else => try self.return_type.getLlvmType(ofile), }; - const llvm_param_types = try ofile.a().alloc(llvm.TypeRef, self.params.len); - defer ofile.a().free(llvm_param_types); + const llvm_param_types = try ofile.gpa().alloc(llvm.TypeRef, self.params.len); + defer ofile.gpa().free(llvm_param_types); for (llvm_param_types) |*llvm_param_type, i| { llvm_param_type.* = try self.params[i].typeof.getLlvmType(ofile); } @@ -241,7 +241,7 @@ pub const Type = struct { } pub fn destroy(self: *MetaType, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -255,7 +255,7 @@ pub const Type = struct { } pub fn destroy(self: *Void, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -269,7 +269,7 @@ pub const Type = struct { } pub fn destroy(self: *Bool, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Bool, ofile: *ObjectFile) llvm.TypeRef { @@ -287,7 +287,7 @@ pub const Type = struct { } pub fn destroy(self: *NoReturn, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -295,7 +295,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Int, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Int, ofile: *ObjectFile) llvm.TypeRef { @@ -307,7 +307,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Float, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Float, ofile: *ObjectFile) llvm.TypeRef { @@ -332,7 +332,7 @@ pub const Type = struct { pub const Size = builtin.TypeInfo.Pointer.Size; pub fn destroy(self: *Pointer, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn get( @@ -355,7 +355,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Array, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Array, ofile: *ObjectFile) llvm.TypeRef { @@ -367,7 +367,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *ComptimeFloat, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -375,7 +375,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *ComptimeInt, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -383,7 +383,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Undefined, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -391,7 +391,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Null, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -399,7 +399,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Optional, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Optional, ofile: *ObjectFile) llvm.TypeRef { @@ -411,7 +411,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *ErrorUnion, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *ErrorUnion, ofile: *ObjectFile) llvm.TypeRef { @@ -423,7 +423,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *ErrorSet, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *ErrorSet, ofile: *ObjectFile) llvm.TypeRef { @@ -435,7 +435,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Enum, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Enum, ofile: *ObjectFile) llvm.TypeRef { @@ -447,7 +447,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Union, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Union, ofile: *ObjectFile) llvm.TypeRef { @@ -459,7 +459,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Namespace, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -467,7 +467,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Block, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -475,7 +475,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *BoundFn, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *BoundFn, ofile: *ObjectFile) llvm.TypeRef { @@ -487,7 +487,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *ArgTuple, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -495,7 +495,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Opaque, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Opaque, ofile: *ObjectFile) llvm.TypeRef { @@ -507,7 +507,7 @@ pub const Type = struct { base: Type, pub fn destroy(self: *Promise, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmType(self: *Promise, ofile: *ObjectFile) llvm.TypeRef { diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index e3b91d2807..be19c6bccf 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -4,6 +4,7 @@ const Scope = @import("scope.zig").Scope; const Compilation = @import("compilation.zig").Compilation; const ObjectFile = @import("codegen.zig").ObjectFile; const llvm = @import("llvm.zig"); +const Buffer = std.Buffer; /// Values are ref-counted, heap-allocated, and copy-on-write /// If there is only 1 ref then write need not copy @@ -68,7 +69,7 @@ pub const Value = struct { /// The main external name that is used in the .o file. /// TODO https://github.com/ziglang/zig/issues/265 - symbol_name: std.Buffer, + symbol_name: Buffer, /// parent should be the top level decls or container decls fndef_scope: *Scope.FnDef, @@ -79,10 +80,22 @@ pub const Value = struct { /// parent is child_scope block_scope: *Scope.Block, + /// Path to the object file that contains this function + containing_object: Buffer, + + link_set_node: *std.LinkedList(?*Value.Fn).Node, + /// Creates a Fn value with 1 ref /// Takes ownership of symbol_name - pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: std.Buffer) !*Fn { - const self = try comp.a().create(Fn{ + pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: Buffer) !*Fn { + const link_set_node = try comp.gpa().create(Compilation.FnLinkSet.Node{ + .data = null, + .next = undefined, + .prev = undefined, + }); + errdefer comp.gpa().destroy(link_set_node); + + const self = try comp.gpa().create(Fn{ .base = Value{ .id = Value.Id.Fn, .typeof = &fn_type.base, @@ -92,6 +105,8 @@ pub const Value = struct { .child_scope = &fndef_scope.base, .block_scope = undefined, .symbol_name = symbol_name, + .containing_object = Buffer.initNull(comp.gpa()), + .link_set_node = link_set_node, }); fn_type.base.base.ref(); fndef_scope.fn_val = self; @@ -100,9 +115,19 @@ pub const Value = struct { } pub fn destroy(self: *Fn, comp: *Compilation) void { + // remove with a tombstone so that we do not have to grab a lock + if (self.link_set_node.data != null) { + // it's now the job of the link step to find this tombstone and + // deallocate it. + self.link_set_node.data = null; + } else { + comp.gpa().destroy(self.link_set_node); + } + + self.containing_object.deinit(); self.fndef_scope.base.deref(comp); self.symbol_name.deinit(); - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -115,7 +140,7 @@ pub const Value = struct { } pub fn destroy(self: *Void, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -134,7 +159,7 @@ pub const Value = struct { } pub fn destroy(self: *Bool, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } pub fn getLlvmConst(self: *Bool, ofile: *ObjectFile) ?llvm.ValueRef { @@ -156,7 +181,7 @@ pub const Value = struct { } pub fn destroy(self: *NoReturn, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; @@ -170,7 +195,7 @@ pub const Value = struct { }; pub fn destroy(self: *Ptr, comp: *Compilation) void { - comp.a().destroy(self); + comp.gpa().destroy(self); } }; }; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 24f2a8a343..a43d2d182c 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -440,6 +440,11 @@ ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unreso return reinterpret_cast(di_builder); } +void ZigLLVMDisposeDIBuilder(ZigLLVMDIBuilder *dbuilder) { + DIBuilder *di_builder = reinterpret_cast(dbuilder); + delete di_builder; +} + void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder, int line, int column, ZigLLVMDIScope *scope) { unwrap(builder)->SetCurrentDebugLocation(DebugLoc::get( line, column, reinterpret_cast(scope))); diff --git a/src/zig_llvm.h b/src/zig_llvm.h index d34300b8ae..6f25df8674 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -39,7 +39,7 @@ struct ZigLLVMInsertionPoint; ZIG_EXTERN_C void ZigLLVMInitializeLoopStrengthReducePass(LLVMPassRegistryRef R); ZIG_EXTERN_C void ZigLLVMInitializeLowerIntrinsicsPass(LLVMPassRegistryRef R); -/// Caller must free memory. +/// Caller must free memory with LLVMDisposeMessage ZIG_EXTERN_C char *ZigLLVMGetHostCPUName(void); ZIG_EXTERN_C char *ZigLLVMGetNativeFeatures(void); @@ -139,6 +139,7 @@ ZIG_EXTERN_C unsigned ZigLLVMTag_DW_enumeration_type(void); ZIG_EXTERN_C unsigned ZigLLVMTag_DW_union_type(void); ZIG_EXTERN_C struct ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unresolved); +ZIG_EXTERN_C void ZigLLVMDisposeDIBuilder(struct ZigLLVMDIBuilder *dbuilder); ZIG_EXTERN_C void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module); ZIG_EXTERN_C void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module); diff --git a/std/atomic/int.zig b/std/atomic/int.zig index d51454c673..4103d52719 100644 --- a/std/atomic/int.zig +++ b/std/atomic/int.zig @@ -25,5 +25,9 @@ pub fn Int(comptime T: type) type { pub fn get(self: *Self) T { return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst); } + + pub fn xchg(self: *Self, new_value: T) T { + return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst); + } }; } diff --git a/std/buffer.zig b/std/buffer.zig index aff7fa86ef..3b58002aba 100644 --- a/std/buffer.zig +++ b/std/buffer.zig @@ -54,6 +54,19 @@ pub const Buffer = struct { return result; } + pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: ...) !Buffer { + const countSize = struct { + fn countSize(size: *usize, bytes: []const u8) (error{}!void) { + size.* += bytes.len; + } + }.countSize; + var size: usize = 0; + std.fmt.format(&size, error{}, countSize, format, args) catch |err| switch (err) {}; + var self = try Buffer.initSize(allocator, size); + assert((std.fmt.bufPrint(self.list.items, format, args) catch unreachable).len == size); + return self; + } + pub fn deinit(self: *Buffer) void { self.list.deinit(); } diff --git a/std/dwarf.zig b/std/dwarf.zig index 76ed122447..2cf8ed953e 100644 --- a/std/dwarf.zig +++ b/std/dwarf.zig @@ -639,3 +639,40 @@ pub const LNE_define_file = 0x03; pub const LNE_set_discriminator = 0x04; pub const LNE_lo_user = 0x80; pub const LNE_hi_user = 0xff; + +pub const LANG_C89 = 0x0001; +pub const LANG_C = 0x0002; +pub const LANG_Ada83 = 0x0003; +pub const LANG_C_plus_plus = 0x0004; +pub const LANG_Cobol74 = 0x0005; +pub const LANG_Cobol85 = 0x0006; +pub const LANG_Fortran77 = 0x0007; +pub const LANG_Fortran90 = 0x0008; +pub const LANG_Pascal83 = 0x0009; +pub const LANG_Modula2 = 0x000a; +pub const LANG_Java = 0x000b; +pub const LANG_C99 = 0x000c; +pub const LANG_Ada95 = 0x000d; +pub const LANG_Fortran95 = 0x000e; +pub const LANG_PLI = 0x000f; +pub const LANG_ObjC = 0x0010; +pub const LANG_ObjC_plus_plus = 0x0011; +pub const LANG_UPC = 0x0012; +pub const LANG_D = 0x0013; +pub const LANG_Python = 0x0014; +pub const LANG_Go = 0x0016; +pub const LANG_C_plus_plus_11 = 0x001a; +pub const LANG_Rust = 0x001c; +pub const LANG_C11 = 0x001d; +pub const LANG_C_plus_plus_14 = 0x0021; +pub const LANG_Fortran03 = 0x0022; +pub const LANG_Fortran08 = 0x0023; +pub const LANG_lo_user = 0x8000; +pub const LANG_hi_user = 0xffff; +pub const LANG_Mips_Assembler = 0x8001; +pub const LANG_Upc = 0x8765; +pub const LANG_HP_Bliss = 0x8003; +pub const LANG_HP_Basic91 = 0x8004; +pub const LANG_HP_Pascal91 = 0x8005; +pub const LANG_HP_IMacro = 0x8006; +pub const LANG_HP_Assembler = 0x8007; diff --git a/std/event/future.zig b/std/event/future.zig index 0f27b4131b..f5d14d1ca6 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -6,15 +6,20 @@ const AtomicOrder = builtin.AtomicOrder; const Lock = std.event.Lock; const Loop = std.event.Loop; -/// This is a value that starts out unavailable, until a value is put(). +/// This is a value that starts out unavailable, until resolve() is called /// While it is unavailable, coroutines suspend when they try to get() it, -/// and then are resumed when the value is put(). -/// At this point the value remains forever available, and another put() is not allowed. +/// and then are resumed when resolve() is called. +/// At this point the value remains forever available, and another resolve() is not allowed. pub fn Future(comptime T: type) type { return struct { lock: Lock, data: T, - available: u8, // TODO make this a bool + + /// TODO make this an enum + /// 0 - not started + /// 1 - started + /// 2 - finished + available: u8, const Self = this; const Queue = std.atomic.Queue(promise); @@ -31,7 +36,7 @@ pub fn Future(comptime T: type) type { /// available. /// Thread-safe. pub async fn get(self: *Self) *T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { return &self.data; } const held = await (async self.lock.acquire() catch unreachable); @@ -43,18 +48,36 @@ pub fn Future(comptime T: type) type { /// Gets the data without waiting for it. If it's available, a pointer is /// returned. Otherwise, null is returned. pub fn getOrNull(self: *Self) ?*T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { return &self.data; } else { return null; } } + /// If someone else has started working on the data, wait for them to complete + /// and return a pointer to the data. Otherwise, return null, and the caller + /// should start working on the data. + /// It's not required to call start() before resolve() but it can be useful since + /// this method is thread-safe. + pub async fn start(self: *Self) ?*T { + const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null; + switch (state) { + 1 => { + const held = await (async self.lock.acquire() catch unreachable); + held.release(); + return &self.data; + }, + 2 => return &self.data, + else => unreachable, + } + } + /// Make the data become available. May be called only once. /// Before calling this, modify the `data` property. pub fn resolve(self: *Self) void { - const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - assert(prev == 0); // put() called twice + const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst); + assert(prev == 0 or prev == 1); // resolve() called twice Lock.Held.release(Lock.Held{ .lock = &self.lock }); } }; diff --git a/std/index.zig b/std/index.zig index 3b523f519f..2f4cfb7553 100644 --- a/std/index.zig +++ b/std/index.zig @@ -36,6 +36,8 @@ pub const sort = @import("sort.zig"); pub const unicode = @import("unicode.zig"); pub const zig = @import("zig/index.zig"); +pub const lazyInit = @import("lazy_init.zig").lazyInit; + test "std" { // run tests from these _ = @import("atomic/index.zig"); @@ -71,4 +73,5 @@ test "std" { _ = @import("sort.zig"); _ = @import("unicode.zig"); _ = @import("zig/index.zig"); + _ = @import("lazy_init.zig"); } diff --git a/std/lazy_init.zig b/std/lazy_init.zig new file mode 100644 index 0000000000..c46c067810 --- /dev/null +++ b/std/lazy_init.zig @@ -0,0 +1,85 @@ +const std = @import("index.zig"); +const builtin = @import("builtin"); +const assert = std.debug.assert; +const AtomicRmwOp = builtin.AtomicRmwOp; +const AtomicOrder = builtin.AtomicOrder; + +/// Thread-safe initialization of global data. +/// TODO use a mutex instead of a spinlock +pub fn lazyInit(comptime T: type) LazyInit(T) { + return LazyInit(T){ + .data = undefined, + .state = 0, + }; +} + +fn LazyInit(comptime T: type) type { + return struct { + state: u8, // TODO make this an enum + data: Data, + + const Self = this; + + // TODO this isn't working for void, investigate and then remove this special case + const Data = if (@sizeOf(T) == 0) u8 else T; + const Ptr = if (T == void) void else *T; + + /// Returns a usable pointer to the initialized data, + /// or returns null, indicating that the caller should + /// perform the initialization and then call resolve(). + pub fn get(self: *Self) ?Ptr { + while (true) { + var state = @cmpxchgWeak(u8, &self.state, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null; + switch (state) { + 0 => continue, + 1 => { + // TODO mutex instead of a spinlock + continue; + }, + 2 => { + if (@sizeOf(T) == 0) { + return T(undefined); + } else { + return &self.data; + } + }, + else => unreachable, + } + } + } + + pub fn resolve(self: *Self) void { + const prev = @atomicRmw(u8, &self.state, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst); + assert(prev == 1); // resolve() called twice + } + }; +} + +var global_number = lazyInit(i32); + +test "std.lazyInit" { + if (global_number.get()) |_| @panic("bad") else { + global_number.data = 1234; + global_number.resolve(); + } + if (global_number.get()) |x| { + assert(x.* == 1234); + } else { + @panic("bad"); + } + if (global_number.get()) |x| { + assert(x.* == 1234); + } else { + @panic("bad"); + } +} + +var global_void = lazyInit(void); + +test "std.lazyInit(void)" { + if (global_void.get()) |_| @panic("bad") else { + global_void.resolve(); + } + assert(global_void.get() != null); + assert(global_void.get() != null); +} -- cgit v1.2.3 From 244a7fdafb97b215e0e9e3e8aaa23777eccebd14 Mon Sep 17 00:00:00 2001 From: kristopher tate Date: Sun, 29 Jul 2018 17:12:52 +0900 Subject: std/event/future.zig: remove promise_symbol from suspend and use @handle(); Tracking Issue #1296 ; --- std/event/future.zig | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'std/event/future.zig') diff --git a/std/event/future.zig b/std/event/future.zig index f5d14d1ca6..f9b9db86a7 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -100,8 +100,9 @@ test "std.event.Future" { } async fn testFuture(loop: *Loop) void { - suspend |p| { - resume p; + suspend { + var h: promise = @handle(); + resume h; } var future = Future(i32).init(loop); @@ -115,15 +116,17 @@ async fn testFuture(loop: *Loop) void { } async fn waitOnFuture(future: *Future(i32)) i32 { - suspend |p| { - resume p; + suspend { + var h: promise = @handle(); + resume h; } return (await (async future.get() catch @panic("memory"))).*; } async fn resolveFuture(future: *Future(i32)) void { - suspend |p| { - resume p; + suspend { + var h: promise = @handle(); + resume h; } future.data = 6; future.resolve(); -- cgit v1.2.3 From 96a94e7da933dafec25356c435f5725c3cb0ce04 Mon Sep 17 00:00:00 2001 From: kristopher tate Date: Thu, 2 Aug 2018 17:52:40 +0900 Subject: std/event: directly return @handle(); Tracking Issue #1296 ; --- std/event/future.zig | 9 +++------ std/event/group.zig | 3 +-- std/event/lock.zig | 3 +-- std/event/tcp.zig | 3 +-- 4 files changed, 6 insertions(+), 12 deletions(-) (limited to 'std/event/future.zig') diff --git a/std/event/future.zig b/std/event/future.zig index f9b9db86a7..8abdce7d02 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -101,8 +101,7 @@ test "std.event.Future" { async fn testFuture(loop: *Loop) void { suspend { - var h: promise = @handle(); - resume h; + resume @handle(); } var future = Future(i32).init(loop); @@ -117,16 +116,14 @@ async fn testFuture(loop: *Loop) void { async fn waitOnFuture(future: *Future(i32)) i32 { suspend { - var h: promise = @handle(); - resume h; + resume @handle(); } return (await (async future.get() catch @panic("memory"))).*; } async fn resolveFuture(future: *Future(i32)) void { suspend { - var h: promise = @handle(); - resume h; + resume @handle(); } future.data = 6; future.resolve(); diff --git a/std/event/group.zig b/std/event/group.zig index 493913010f..6c7fc63699 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -57,8 +57,7 @@ pub fn Group(comptime ReturnType: type) type { suspend { var my_node: Stack.Node = undefined; node.* = &my_node; - var h: promise = @handle(); - resume h; + resume @handle(); } // TODO this allocation elision should be guaranteed because we await it in diff --git a/std/event/lock.zig b/std/event/lock.zig index 2769a2153c..c4cb1a3f0e 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -142,8 +142,7 @@ test "std.event.Lock" { async fn testLock(loop: *Loop, lock: *Lock) void { // TODO explicitly put next tick node memory in the coroutine frame #1194 suspend { - var h: promise = @handle(); - resume h; + resume @handle(); } const handle1 = async lockRunner(lock) catch @panic("out of memory"); var tick_node1 = Loop.NextTickNode{ diff --git a/std/event/tcp.zig b/std/event/tcp.zig index 9a3c6f95ca..ea803a9322 100644 --- a/std/event/tcp.zig +++ b/std/event/tcp.zig @@ -142,8 +142,7 @@ test "listen on a port, send bytes, receive bytes" { std.debug.panic("unable to handle connection: {}\n", err); }; suspend { - var h: promise = @handle(); - cancel h; + cancel @handle(); } } async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void { -- cgit v1.2.3