aboutsummaryrefslogtreecommitdiff
path: root/src-self-hosted
diff options
context:
space:
mode:
authorAndrea Orru <andrea@orru.io>2018-08-06 01:43:19 -0400
committerAndrea Orru <andrea@orru.io>2018-08-06 01:43:19 -0400
commitd2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d (patch)
treee9fa3caec533a0d1e2b434868b2fde1f9240e5c8 /src-self-hosted
parent06614b3fa09954464c2e2f32756cacedc178a282 (diff)
parent63a23e848a62d5f167f8d5478de9766cb24aa6eb (diff)
downloadzig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.tar.gz
zig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.zip
Merge branch 'master' into zen_stdlib
Diffstat (limited to 'src-self-hosted')
-rw-r--r--src-self-hosted/arg.zig96
-rw-r--r--src-self-hosted/c.zig3
-rw-r--r--src-self-hosted/c_int.zig68
-rw-r--r--src-self-hosted/codegen.zig450
-rw-r--r--src-self-hosted/compilation.zig1303
-rw-r--r--src-self-hosted/decl.zig98
-rw-r--r--src-self-hosted/errmsg.zig237
-rw-r--r--src-self-hosted/introspect.zig17
-rw-r--r--src-self-hosted/ir.zig2688
-rw-r--r--src-self-hosted/libc_installation.zig462
-rw-r--r--src-self-hosted/link.zig737
-rw-r--r--src-self-hosted/llvm.zig209
-rw-r--r--src-self-hosted/main.zig1052
-rw-r--r--src-self-hosted/module.zig326
-rw-r--r--src-self-hosted/package.zig29
-rw-r--r--src-self-hosted/scope.zig392
-rw-r--r--src-self-hosted/target.zig556
-rw-r--r--src-self-hosted/test.zig243
-rw-r--r--src-self-hosted/type.zig1101
-rw-r--r--src-self-hosted/value.zig581
-rw-r--r--src-self-hosted/visib.zig4
21 files changed, 9558 insertions, 1094 deletions
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index 707f208287..2ab44e5fdf 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -30,24 +30,22 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
-fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize,
- allowed_set: ?[]const []const u8, index: &usize) !FlagArg {
-
+fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
- 0 => return FlagArg { .None = undefined }, // TODO: Required to force non-tag but value?
+ 0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
- if (*index + 1 >= args.len) {
+ if (index.* + 1 >= args.len) {
return error.MissingFlagArguments;
}
- *index += 1;
- const arg = args[*index];
+ index.* += 1;
+ const arg = args[index.*];
if (!argInAllowedSet(allowed_set, arg)) {
return error.ArgumentNotInAllowedSet;
}
- return FlagArg { .Single = arg };
+ return FlagArg{ .Single = arg };
},
else => |needed| {
var extra = ArrayList([]const u8).init(allocator);
@@ -55,12 +53,12 @@ fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required:
var j: usize = 0;
while (j < needed) : (j += 1) {
- if (*index + 1 >= args.len) {
+ if (index.* + 1 >= args.len) {
return error.MissingFlagArguments;
}
- *index += 1;
- const arg = args[*index];
+ index.* += 1;
+ const arg = args[index.*];
if (!argInAllowedSet(allowed_set, arg)) {
return error.ArgumentNotInAllowedSet;
@@ -69,7 +67,7 @@ fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required:
try extra.append(arg);
}
- return FlagArg { .Many = extra };
+ return FlagArg{ .Many = extra };
},
}
}
@@ -81,8 +79,8 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
- pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
- var parsed = Args {
+ pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
+ var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
};
@@ -101,7 +99,7 @@ pub const Args = struct {
error.ArgumentNotInAllowedSet => {
std.debug.warn("argument '{}' is invalid for flag '{}'\n", args[i], arg);
std.debug.warn("allowed options are ");
- for (??flag.allowed_set) |possible| {
+ for (flag.allowed_set.?) |possible| {
std.debug.warn("'{}' ", possible);
}
std.debug.warn("\n");
@@ -116,11 +114,7 @@ pub const Args = struct {
};
if (flag.mergable) {
- var prev =
- if (parsed.flags.get(flag_name_trimmed)) |entry|
- entry.value.Many
- else
- ArrayList([]const u8).init(allocator);
+ var prev = if (parsed.flags.get(flag_name_trimmed)) |entry| entry.value.Many else ArrayList([]const u8).init(allocator);
// MergeN creation disallows 0 length flag entry (doesn't make sense)
switch (flag_args) {
@@ -129,7 +123,7 @@ pub const Args = struct {
FlagArg.Many => |inner| try prev.appendSlice(inner.toSliceConst()),
}
- _ = try parsed.flags.put(flag_name_trimmed, FlagArg { .Many = prev });
+ _ = try parsed.flags.put(flag_name_trimmed, FlagArg{ .Many = prev });
} else {
_ = try parsed.flags.put(flag_name_trimmed, flag_args);
}
@@ -149,21 +143,23 @@ pub const Args = struct {
return parsed;
}
- pub fn deinit(self: &Args) void {
+ pub fn deinit(self: *Args) void {
self.flags.deinit();
self.positionals.deinit();
}
// e.g. --help
- pub fn present(self: &Args, name: []const u8) bool {
+ pub fn present(self: *Args, name: []const u8) bool {
return self.flags.contains(name);
}
// e.g. --name value
- pub fn single(self: &Args, name: []const u8) ?[]const u8 {
+ pub fn single(self: *Args, name: []const u8) ?[]const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
- FlagArg.Single => |inner| { return inner; },
+ FlagArg.Single => |inner| {
+ return inner;
+ },
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
@@ -172,14 +168,16 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: &Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) []const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
- FlagArg.Many => |inner| { return inner.toSliceConst(); },
+ FlagArg.Many => |inner| {
+ return inner.toSliceConst();
+ },
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
- return null;
+ return []const []const u8{};
}
}
};
@@ -207,7 +205,7 @@ pub const Flag = struct {
}
pub fn ArgN(comptime name: []const u8, comptime n: usize) Flag {
- return Flag {
+ return Flag{
.name = name,
.required = n,
.mergable = false,
@@ -220,7 +218,7 @@ pub const Flag = struct {
@compileError("n must be greater than 0");
}
- return Flag {
+ return Flag{
.name = name,
.required = n,
.mergable = true,
@@ -229,7 +227,7 @@ pub const Flag = struct {
}
pub fn Option(comptime name: []const u8, comptime set: []const []const u8) Flag {
- return Flag {
+ return Flag{
.name = name,
.required = 1,
.mergable = false,
@@ -239,26 +237,36 @@ pub const Flag = struct {
};
test "parse arguments" {
- const spec1 = comptime []const Flag {
+ const spec1 = comptime []const Flag{
Flag.Bool("--help"),
Flag.Bool("--init"),
Flag.Arg1("--build-file"),
- Flag.Option("--color", []const []const u8 { "on", "off", "auto" }),
+ Flag.Option("--color", []const []const u8{
+ "on",
+ "off",
+ "auto",
+ }),
Flag.ArgN("--pkg-begin", 2),
Flag.ArgMergeN("--object", 1),
Flag.ArgN("--library", 1),
};
- const cliargs = []const []const u8 {
+ const cliargs = []const []const u8{
"build",
"--help",
"pos1",
- "--build-file", "build.zig",
- "--object", "obj1",
- "--object", "obj2",
- "--library", "lib1",
- "--library", "lib2",
- "--color", "on",
+ "--build-file",
+ "build.zig",
+ "--object",
+ "obj1",
+ "--object",
+ "obj2",
+ "--library",
+ "lib1",
+ "--library",
+ "lib2",
+ "--color",
+ "on",
"pos2",
};
@@ -268,14 +276,14 @@ test "parse arguments" {
debug.assert(!args.present("help2"));
debug.assert(!args.present("init"));
- debug.assert(mem.eql(u8, ??args.single("build-file"), "build.zig"));
- debug.assert(mem.eql(u8, ??args.single("color"), "on"));
+ debug.assert(mem.eql(u8, args.single("build-file").?, "build.zig"));
+ debug.assert(mem.eql(u8, args.single("color").?, "on"));
- const objects = ??args.many("object");
+ const objects = args.many("object").?;
debug.assert(mem.eql(u8, objects[0], "obj1"));
debug.assert(mem.eql(u8, objects[1], "obj2"));
- debug.assert(mem.eql(u8, ??args.single("library"), "lib2"));
+ debug.assert(mem.eql(u8, args.single("library").?, "lib2"));
const pos = args.positionals.toSliceConst();
debug.assert(mem.eql(u8, pos[0], "build"));
diff --git a/src-self-hosted/c.zig b/src-self-hosted/c.zig
index 08060fbe3a..778d851240 100644
--- a/src-self-hosted/c.zig
+++ b/src-self-hosted/c.zig
@@ -1,5 +1,8 @@
pub use @cImport({
+ @cDefine("__STDC_CONSTANT_MACROS", "");
+ @cDefine("__STDC_LIMIT_MACROS", "");
@cInclude("inttypes.h");
@cInclude("config.h");
@cInclude("zig_llvm.h");
+ @cInclude("windows_sdk.h");
});
diff --git a/src-self-hosted/c_int.zig b/src-self-hosted/c_int.zig
new file mode 100644
index 0000000000..10ce54da05
--- /dev/null
+++ b/src-self-hosted/c_int.zig
@@ -0,0 +1,68 @@
+pub const CInt = struct {
+ id: Id,
+ zig_name: []const u8,
+ c_name: []const u8,
+ is_signed: bool,
+
+ pub const Id = enum {
+ Short,
+ UShort,
+ Int,
+ UInt,
+ Long,
+ ULong,
+ LongLong,
+ ULongLong,
+ };
+
+ pub const list = []CInt{
+ CInt{
+ .id = Id.Short,
+ .zig_name = "c_short",
+ .c_name = "short",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.UShort,
+ .zig_name = "c_ushort",
+ .c_name = "unsigned short",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.Int,
+ .zig_name = "c_int",
+ .c_name = "int",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.UInt,
+ .zig_name = "c_uint",
+ .c_name = "unsigned int",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.Long,
+ .zig_name = "c_long",
+ .c_name = "long",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.ULong,
+ .zig_name = "c_ulong",
+ .c_name = "unsigned long",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.LongLong,
+ .zig_name = "c_longlong",
+ .c_name = "long long",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.ULongLong,
+ .zig_name = "c_ulonglong",
+ .c_name = "unsigned long long",
+ .is_signed = false,
+ },
+ };
+};
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
new file mode 100644
index 0000000000..5ca01ca7e7
--- /dev/null
+++ b/src-self-hosted/codegen.zig
@@ -0,0 +1,450 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Compilation = @import("compilation.zig").Compilation;
+const llvm = @import("llvm.zig");
+const c = @import("c.zig");
+const ir = @import("ir.zig");
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const Scope = @import("scope.zig").Scope;
+const event = std.event;
+const assert = std.debug.assert;
+const DW = std.dwarf;
+
+pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void {
+ fn_val.base.ref();
+ defer fn_val.base.deref(comp);
+ defer code.destroy(comp.gpa());
+
+ var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable);
+ errdefer output_path.deinit();
+
+ const llvm_handle = try comp.event_loop_local.getAnyLlvmContext();
+ defer llvm_handle.release(comp.event_loop_local);
+
+ const context = llvm_handle.node.data;
+
+ const module = llvm.ModuleCreateWithNameInContext(comp.name.ptr(), context) orelse return error.OutOfMemory;
+ defer llvm.DisposeModule(module);
+
+ llvm.SetTarget(module, comp.llvm_triple.ptr());
+ llvm.SetDataLayout(module, comp.target_layout_str);
+
+ if (comp.target.getObjectFormat() == builtin.ObjectFormat.coff) {
+ llvm.AddModuleCodeViewFlag(module);
+ } else {
+ llvm.AddModuleDebugInfoFlag(module);
+ }
+
+ const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
+ defer llvm.DisposeBuilder(builder);
+
+ const dibuilder = llvm.CreateDIBuilder(module, true) orelse return error.OutOfMemory;
+ defer llvm.DisposeDIBuilder(dibuilder);
+
+ // Don't use ZIG_VERSION_STRING here. LLVM misparses it when it includes
+ // the git revision.
+ const producer = try std.Buffer.allocPrint(
+ &code.arena.allocator,
+ "zig {}.{}.{}",
+ u32(c.ZIG_VERSION_MAJOR),
+ u32(c.ZIG_VERSION_MINOR),
+ u32(c.ZIG_VERSION_PATCH),
+ );
+ const flags = c"";
+ const runtime_version = 0;
+ const compile_unit_file = llvm.CreateFile(
+ dibuilder,
+ comp.name.ptr(),
+ comp.root_package.root_src_dir.ptr(),
+ ) orelse return error.OutOfMemory;
+ const is_optimized = comp.build_mode != builtin.Mode.Debug;
+ const compile_unit = llvm.CreateCompileUnit(
+ dibuilder,
+ DW.LANG_C99,
+ compile_unit_file,
+ producer.ptr(),
+ is_optimized,
+ flags,
+ runtime_version,
+ c"",
+ 0,
+ !comp.strip,
+ ) orelse return error.OutOfMemory;
+
+ var ofile = ObjectFile{
+ .comp = comp,
+ .module = module,
+ .builder = builder,
+ .dibuilder = dibuilder,
+ .context = context,
+ .lock = event.Lock.init(comp.loop),
+ .arena = &code.arena.allocator,
+ };
+
+ try renderToLlvmModule(&ofile, fn_val, code);
+
+ // TODO module level assembly
+ //if (buf_len(&g->global_asm) != 0) {
+ // LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm));
+ //}
+
+ llvm.DIBuilderFinalize(dibuilder);
+
+ if (comp.verbose_llvm_ir) {
+ std.debug.warn("raw module:\n");
+ llvm.DumpModule(ofile.module);
+ }
+
+ // verify the llvm module when safety is on
+ if (std.debug.runtime_safety) {
+ var error_ptr: ?[*]u8 = null;
+ _ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr);
+ }
+
+ assert(comp.emit_file_type == Compilation.Emit.Binary); // TODO support other types
+
+ const is_small = comp.build_mode == builtin.Mode.ReleaseSmall;
+ const is_debug = comp.build_mode == builtin.Mode.Debug;
+
+ var err_msg: [*]u8 = undefined;
+ // TODO integrate this with evented I/O
+ if (llvm.TargetMachineEmitToFile(
+ comp.target_machine,
+ module,
+ output_path.ptr(),
+ llvm.EmitBinary,
+ &err_msg,
+ is_debug,
+ is_small,
+ )) {
+ if (std.debug.runtime_safety) {
+ std.debug.panic("unable to write object file {}: {s}\n", output_path.toSliceConst(), err_msg);
+ }
+ return error.WritingObjectFileFailed;
+ }
+ //validate_inline_fns(g); TODO
+ fn_val.containing_object = output_path;
+ if (comp.verbose_llvm_ir) {
+ std.debug.warn("optimized module:\n");
+ llvm.DumpModule(ofile.module);
+ }
+ if (comp.verbose_link) {
+ std.debug.warn("created {}\n", output_path.toSliceConst());
+ }
+}
+
+pub const ObjectFile = struct {
+ comp: *Compilation,
+ module: llvm.ModuleRef,
+ builder: llvm.BuilderRef,
+ dibuilder: *llvm.DIBuilder,
+ context: llvm.ContextRef,
+ lock: event.Lock,
+ arena: *std.mem.Allocator,
+
+ fn gpa(self: *ObjectFile) *std.mem.Allocator {
+ return self.comp.gpa();
+ }
+};
+
+pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void {
+ // TODO audit more of codegen.cpp:fn_llvm_value and port more logic
+ const llvm_fn_type = try fn_val.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ fn_val.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ const want_fn_safety = fn_val.block_scope.?.safety.get(ofile.comp);
+ if (want_fn_safety and ofile.comp.haveLibC()) {
+ try addLLVMFnAttr(ofile, llvm_fn, "sspstrong");
+ try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4");
+ }
+
+ // TODO
+ //if (fn_val.align_stack) |align_stack| {
+ // try addLLVMFnAttrInt(ofile, llvm_fn, "alignstack", align_stack);
+ //}
+
+ const fn_type = fn_val.base.typ.cast(Type.Fn).?;
+ const fn_type_normal = &fn_type.key.data.Normal;
+
+ try addLLVMFnAttr(ofile, llvm_fn, "nounwind");
+ //add_uwtable_attr(g, fn_table_entry->llvm_value);
+ try addLLVMFnAttr(ofile, llvm_fn, "nobuiltin");
+
+ //if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
+ // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
+ // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
+ //}
+
+ //if (fn_table_entry->section_name) {
+ // LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
+ //}
+ //if (fn_table_entry->align_bytes > 0) {
+ // LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
+ //} else {
+ // // We'd like to set the best alignment for the function here, but on Darwin LLVM gives
+ // // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
+ // // any of the functions for getting alignment. Not specifying the alignment should
+ // // use the ABI alignment, which is fine.
+ //}
+
+ //if (!type_has_bits(return_type)) {
+ // // nothing to do
+ //} else if (type_is_codegen_pointer(return_type)) {
+ // addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ //} else if (handle_is_ptr(return_type) &&
+ // calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc))
+ //{
+ // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
+ // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ //}
+
+ // TODO set parameter attributes
+
+ // TODO
+ //uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
+ //if (err_ret_trace_arg_index != UINT32_MAX) {
+ // addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
+ //}
+
+ const cur_ret_ptr = if (fn_type_normal.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
+
+ // build all basic blocks
+ for (code.basic_block_list.toSlice()) |bb| {
+ bb.llvm_block = llvm.AppendBasicBlockInContext(
+ ofile.context,
+ llvm_fn,
+ bb.name_hint,
+ ) orelse return error.OutOfMemory;
+ }
+ const entry_bb = code.basic_block_list.at(0);
+ llvm.PositionBuilderAtEnd(ofile.builder, entry_bb.llvm_block);
+
+ llvm.ClearCurrentDebugLocation(ofile.builder);
+
+ // TODO set up error return tracing
+ // TODO allocate temporary stack values
+
+ const var_list = fn_type.non_key.Normal.variable_list.toSliceConst();
+ // create debug variable declarations for variables and allocate all local variables
+ for (var_list) |var_scope, i| {
+ const var_type = switch (var_scope.data) {
+ Scope.Var.Data.Const => unreachable,
+ Scope.Var.Data.Param => |param| param.typ,
+ };
+ // if (!type_has_bits(var->value->type)) {
+ // continue;
+ // }
+ // if (ir_get_var_is_comptime(var))
+ // continue;
+ // if (type_requires_comptime(var->value->type))
+ // continue;
+ // if (var->src_arg_index == SIZE_MAX) {
+ // var->value_ref = build_alloca(g, var->value->type, buf_ptr(&var->name), var->align_bytes);
+
+ // var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file, (unsigned)(var->decl_node->line + 1),
+ // var->value->type->di_type, !g->strip_debug_symbols, 0);
+
+ // } else {
+ // it's a parameter
+ // assert(var->gen_arg_index != SIZE_MAX);
+ // TypeTableEntry *gen_type;
+ // FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
+
+ if (var_type.handleIsPtr()) {
+ // if (gen_info->is_byval) {
+ // gen_type = var->value->type;
+ // } else {
+ // gen_type = gen_info->type;
+ // }
+ var_scope.data.Param.llvm_value = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ } else {
+ // gen_type = var->value->type;
+ var_scope.data.Param.llvm_value = try renderAlloca(ofile, var_type, var_scope.name, Type.Pointer.Align.Abi);
+ }
+ // if (var->decl_node) {
+ // var->di_loc_var = ZigLLVMCreateParameterVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file,
+ // (unsigned)(var->decl_node->line + 1),
+ // gen_type->di_type, !g->strip_debug_symbols, 0, (unsigned)(var->gen_arg_index + 1));
+ // }
+
+ // }
+ }
+
+ // TODO finishing error return trace setup. we have to do this after all the allocas.
+
+ // create debug variable declarations for parameters
+ // rely on the first variables in the variable_list being parameters.
+ //size_t next_var_i = 0;
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //FnGenParamInfo *info = &fn_table_entry->type_entry->data.fn.gen_param_info[param_i];
+ //if (info->gen_index == SIZE_MAX)
+ // continue;
+ const scope_var = var_list[i];
+ //assert(variable->src_arg_index != SIZE_MAX);
+ //next_var_i += 1;
+ //assert(variable);
+ //assert(variable->value_ref);
+
+ if (!param.typ.handleIsPtr()) {
+ //clear_debug_source_node(g);
+ const llvm_param = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ _ = renderStoreUntyped(
+ ofile,
+ llvm_param,
+ scope_var.data.Param.llvm_value,
+ Type.Pointer.Align.Abi,
+ Type.Pointer.Vol.Non,
+ );
+ }
+
+ //if (variable->decl_node) {
+ // gen_var_debug_decl(g, variable);
+ //}
+ }
+
+ for (code.basic_block_list.toSlice()) |current_block| {
+ llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block);
+ for (current_block.instruction_list.toSlice()) |instruction| {
+ if (instruction.ref_count == 0 and !instruction.hasSideEffects()) continue;
+
+ instruction.llvm_value = try instruction.render(ofile, fn_val);
+ }
+ current_block.llvm_exit_block = llvm.GetInsertBlock(ofile.builder);
+ }
+}
+
+fn addLLVMAttr(
+ ofile: *ObjectFile,
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+) !void {
+ const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
+ assert(kind_id != 0);
+ const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, 0) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMAttrStr(
+ ofile: *ObjectFile,
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+ attr_val: []const u8,
+) !void {
+ const llvm_attr = llvm.CreateStringAttribute(
+ ofile.context,
+ attr_name.ptr,
+ @intCast(c_uint, attr_name.len),
+ attr_val.ptr,
+ @intCast(c_uint, attr_val.len),
+ ) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMAttrInt(
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+ attr_val: u64,
+) !void {
+ const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
+ assert(kind_id != 0);
+ const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, attr_val) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMFnAttr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8) !void {
+ return addLLVMAttr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name);
+}
+
+fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: []const u8) !void {
+ return addLLVMAttrStr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
+}
+
+fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: u64) !void {
+ return addLLVMAttrInt(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
+}
+
+fn renderLoadUntyped(
+ ofile: *ObjectFile,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+ name: [*]const u8,
+) !llvm.ValueRef {
+ const result = llvm.BuildLoad(ofile.builder, ptr, name) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.GetElementType(llvm.TypeOf(ptr))));
+ return result;
+}
+
+fn renderLoad(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer, name: [*]const u8) !llvm.ValueRef {
+ return renderLoadUntyped(ofile, ptr, ptr_type.key.alignment, ptr_type.key.vol, name);
+}
+
+pub fn getHandleValue(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer) !?llvm.ValueRef {
+ const child_type = ptr_type.key.child_type;
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ if (child_type.handleIsPtr()) {
+ return ptr;
+ }
+ return try renderLoad(ofile, ptr, ptr_type, c"");
+}
+
+pub fn renderStoreUntyped(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+) !llvm.ValueRef {
+ const result = llvm.BuildStore(ofile.builder, value, ptr) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.TypeOf(value)));
+ return result;
+}
+
+pub fn renderStore(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ ptr_type: *Type.Pointer,
+) !llvm.ValueRef {
+ return renderStoreUntyped(ofile, value, ptr, ptr_type.key.alignment, ptr_type.key.vol);
+}
+
+pub fn renderAlloca(
+ ofile: *ObjectFile,
+ var_type: *Type,
+ name: []const u8,
+ alignment: Type.Pointer.Align,
+) !llvm.ValueRef {
+ const llvm_var_type = try var_type.getLlvmType(ofile.arena, ofile.context);
+ const name_with_null = try std.cstr.addNullByte(ofile.arena, name);
+ const result = llvm.BuildAlloca(ofile.builder, llvm_var_type, name_with_null.ptr) orelse return error.OutOfMemory;
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm_var_type));
+ return result;
+}
+
+pub fn resolveAlign(ofile: *ObjectFile, alignment: Type.Pointer.Align, llvm_type: llvm.TypeRef) u32 {
+ return switch (alignment) {
+ Type.Pointer.Align.Abi => return llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, llvm_type),
+ Type.Pointer.Align.Override => |a| a,
+ };
+}
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
new file mode 100644
index 0000000000..5ff8b1a858
--- /dev/null
+++ b/src-self-hosted/compilation.zig
@@ -0,0 +1,1303 @@
+const std = @import("std");
+const os = std.os;
+const io = std.io;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const Buffer = std.Buffer;
+const llvm = @import("llvm.zig");
+const c = @import("c.zig");
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const warn = std.debug.warn;
+const Token = std.zig.Token;
+const ArrayList = std.ArrayList;
+const errmsg = @import("errmsg.zig");
+const ast = std.zig.ast;
+const event = std.event;
+const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Scope = @import("scope.zig").Scope;
+const Decl = @import("decl.zig").Decl;
+const ir = @import("ir.zig");
+const Visib = @import("visib.zig").Visib;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const Span = errmsg.Span;
+const Msg = errmsg.Msg;
+const codegen = @import("codegen.zig");
+const Package = @import("package.zig").Package;
+const link = @import("link.zig").link;
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
+const CInt = @import("c_int.zig").CInt;
+
+/// Data that is local to the event loop.
+pub const EventLoopLocal = struct {
+ loop: *event.Loop,
+ llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
+ lld_lock: event.Lock,
+
+ /// TODO pool these so that it doesn't have to lock
+ prng: event.Locked(std.rand.DefaultPrng),
+
+ native_libc: event.Future(LibCInstallation),
+
+ var lazy_init_targets = std.lazyInit(void);
+
+ fn init(loop: *event.Loop) !EventLoopLocal {
+ lazy_init_targets.get() orelse {
+ Target.initializeAll();
+ lazy_init_targets.resolve();
+ };
+
+ var seed_bytes: [@sizeOf(u64)]u8 = undefined;
+ try std.os.getRandomBytes(seed_bytes[0..]);
+ const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big);
+
+ return EventLoopLocal{
+ .loop = loop,
+ .lld_lock = event.Lock.init(loop),
+ .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
+ .prng = event.Locked(std.rand.DefaultPrng).init(loop, std.rand.DefaultPrng.init(seed)),
+ .native_libc = event.Future(LibCInstallation).init(loop),
+ };
+ }
+
+ /// Must be called only after EventLoop.run completes.
+ fn deinit(self: *EventLoopLocal) void {
+ self.lld_lock.deinit();
+ while (self.llvm_handle_pool.pop()) |node| {
+ c.LLVMContextDispose(node.data);
+ self.loop.allocator.destroy(node);
+ }
+ }
+
+ /// Gets an exclusive handle on any LlvmContext.
+ /// Caller must release the handle when done.
+ pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
+ if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
+
+ const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
+ errdefer c.LLVMContextDispose(context_ref);
+
+ const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
+ .next = undefined,
+ .data = context_ref,
+ });
+ errdefer self.loop.allocator.destroy(node);
+
+ return LlvmHandle{ .node = node };
+ }
+
+ pub async fn getNativeLibC(self: *EventLoopLocal) !*LibCInstallation {
+ if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr;
+ try await (async self.native_libc.data.findNative(self.loop) catch unreachable);
+ self.native_libc.resolve();
+ return &self.native_libc.data;
+ }
+};
+
+pub const LlvmHandle = struct {
+ node: *std.atomic.Stack(llvm.ContextRef).Node,
+
+ pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
+ event_loop_local.llvm_handle_pool.push(self.node);
+ }
+};
+
+pub const Compilation = struct {
+ event_loop_local: *EventLoopLocal,
+ loop: *event.Loop,
+ name: Buffer,
+ llvm_triple: Buffer,
+ root_src_path: ?[]const u8,
+ target: Target,
+ llvm_target: llvm.TargetRef,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+ zig_std_dir: []const u8,
+
+ /// lazily created when we need it
+ tmp_dir: event.Future(BuildError![]u8),
+
+ version_major: u32,
+ version_minor: u32,
+ version_patch: u32,
+
+ linker_script: ?[]const u8,
+ out_h_path: ?[]const u8,
+
+ is_test: bool,
+ each_lib_rpath: bool,
+ strip: bool,
+ is_static: bool,
+ linker_rdynamic: bool,
+
+ clang_argv: []const []const u8,
+ llvm_argv: []const []const u8,
+ lib_dirs: []const []const u8,
+ rpath_list: []const []const u8,
+ assembly_files: []const []const u8,
+
+ /// paths that are explicitly provided by the user to link against
+ link_objects: []const []const u8,
+
+ /// functions that have their own objects that we need to link
+ /// it uses an optional pointer so that tombstone removals are possible
+ fn_link_set: event.Locked(FnLinkSet),
+
+ pub const FnLinkSet = std.LinkedList(?*Value.Fn);
+
+ windows_subsystem_windows: bool,
+ windows_subsystem_console: bool,
+
+ link_libs_list: ArrayList(*LinkLib),
+ libc_link_lib: ?*LinkLib,
+
+ err_color: errmsg.Color,
+
+ verbose_tokenize: bool,
+ verbose_ast_tree: bool,
+ verbose_ast_fmt: bool,
+ verbose_cimport: bool,
+ verbose_ir: bool,
+ verbose_llvm_ir: bool,
+ verbose_link: bool,
+
+ darwin_frameworks: []const []const u8,
+ darwin_version_min: DarwinVersionMin,
+
+ test_filters: []const []const u8,
+ test_name_prefix: ?[]const u8,
+
+ emit_file_type: Emit,
+
+ kind: Kind,
+
+ link_out_file: ?[]const u8,
+ events: *event.Channel(Event),
+
+ exported_symbol_names: event.Locked(Decl.Table),
+
+ /// Before code generation starts, must wait on this group to make sure
+ /// the build is complete.
+ prelink_group: event.Group(BuildError!void),
+
+ compile_errors: event.Locked(CompileErrList),
+
+ meta_type: *Type.MetaType,
+ void_type: *Type.Void,
+ bool_type: *Type.Bool,
+ noreturn_type: *Type.NoReturn,
+ comptime_int_type: *Type.ComptimeInt,
+ u8_type: *Type.Int,
+
+ void_value: *Value.Void,
+ true_value: *Value.Bool,
+ false_value: *Value.Bool,
+ noreturn_value: *Value.NoReturn,
+
+ target_machine: llvm.TargetMachineRef,
+ target_data_ref: llvm.TargetDataRef,
+ target_layout_str: [*]u8,
+ target_ptr_bits: u32,
+
+ /// for allocating things which have the same lifetime as this Compilation
+ arena_allocator: std.heap.ArenaAllocator,
+
+ root_package: *Package,
+ std_package: *Package,
+
+ override_libc: ?*LibCInstallation,
+
+ /// need to wait on this group before deinitializing
+ deinit_group: event.Group(void),
+
+ destroy_handle: promise,
+
+ have_err_ret_tracing: bool,
+
+ /// not locked because it is read-only
+ primitive_type_table: TypeTable,
+
+ int_type_table: event.Locked(IntTypeTable),
+ array_type_table: event.Locked(ArrayTypeTable),
+ ptr_type_table: event.Locked(PtrTypeTable),
+ fn_type_table: event.Locked(FnTypeTable),
+
+ c_int_types: [CInt.list.len]*Type.Int,
+
+ const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
+ const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
+ const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
+ const FnTypeTable = std.HashMap(*const Type.Fn.Key, *Type.Fn, Type.Fn.Key.hash, Type.Fn.Key.eql);
+ const TypeTable = std.HashMap([]const u8, *Type, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ const CompileErrList = std.ArrayList(*Msg);
+
+ // TODO handle some of these earlier and report them in a way other than error codes
+ pub const BuildError = error{
+ OutOfMemory,
+ EndOfStream,
+ BadFd,
+ Io,
+ IsDir,
+ Unexpected,
+ SystemResources,
+ SharingViolation,
+ PathAlreadyExists,
+ FileNotFound,
+ AccessDenied,
+ PipeBusy,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ PathNotFound,
+ NoSpaceLeft,
+ NotDir,
+ FileSystem,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ InputOutput,
+ NoStdHandles,
+ Overflow,
+ NotSupported,
+ BufferTooSmall,
+ Unimplemented, // TODO remove this one
+ SemanticAnalysisFailed, // TODO remove this one
+ ReadOnlyFileSystem,
+ LinkQuotaExceeded,
+ EnvironmentVariableNotFound,
+ AppDataDirUnavailable,
+ LinkFailed,
+ LibCRequiredButNotProvidedOrFound,
+ LibCMissingDynamicLinker,
+ InvalidDarwinVersionString,
+ UnsupportedLinkArchitecture,
+ };
+
+ pub const Event = union(enum) {
+ Ok,
+ Error: BuildError,
+ Fail: []*Msg,
+ };
+
+ pub const DarwinVersionMin = union(enum) {
+ None,
+ MacOS: []const u8,
+ Ios: []const u8,
+ };
+
+ pub const Kind = enum {
+ Exe,
+ Lib,
+ Obj,
+ };
+
+ pub const LinkLib = struct {
+ name: []const u8,
+ path: ?[]const u8,
+
+ /// the list of symbols we depend on from this lib
+ symbols: ArrayList([]u8),
+ provided_explicitly: bool,
+ };
+
+ pub const Emit = enum {
+ Binary,
+ Assembly,
+ LlvmIr,
+ };
+
+ pub fn create(
+ event_loop_local: *EventLoopLocal,
+ name: []const u8,
+ root_src_path: ?[]const u8,
+ target: Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ is_static: bool,
+ zig_lib_dir: []const u8,
+ ) !*Compilation {
+ const loop = event_loop_local.loop;
+ const comp = try event_loop_local.loop.allocator.create(Compilation{
+ .loop = loop,
+ .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
+ .event_loop_local = event_loop_local,
+ .events = undefined,
+ .root_src_path = root_src_path,
+ .target = target,
+ .llvm_target = undefined,
+ .kind = kind,
+ .build_mode = build_mode,
+ .zig_lib_dir = zig_lib_dir,
+ .zig_std_dir = undefined,
+ .tmp_dir = event.Future(BuildError![]u8).init(loop),
+
+ .name = undefined,
+ .llvm_triple = undefined,
+
+ .version_major = 0,
+ .version_minor = 0,
+ .version_patch = 0,
+
+ .verbose_tokenize = false,
+ .verbose_ast_tree = false,
+ .verbose_ast_fmt = false,
+ .verbose_cimport = false,
+ .verbose_ir = false,
+ .verbose_llvm_ir = false,
+ .verbose_link = false,
+
+ .linker_script = null,
+ .out_h_path = null,
+ .is_test = false,
+ .each_lib_rpath = false,
+ .strip = false,
+ .is_static = is_static,
+ .linker_rdynamic = false,
+ .clang_argv = [][]const u8{},
+ .llvm_argv = [][]const u8{},
+ .lib_dirs = [][]const u8{},
+ .rpath_list = [][]const u8{},
+ .assembly_files = [][]const u8{},
+ .link_objects = [][]const u8{},
+ .fn_link_set = event.Locked(FnLinkSet).init(loop, FnLinkSet.init()),
+ .windows_subsystem_windows = false,
+ .windows_subsystem_console = false,
+ .link_libs_list = undefined,
+ .libc_link_lib = null,
+ .err_color = errmsg.Color.Auto,
+ .darwin_frameworks = [][]const u8{},
+ .darwin_version_min = DarwinVersionMin.None,
+ .test_filters = [][]const u8{},
+ .test_name_prefix = null,
+ .emit_file_type = Emit.Binary,
+ .link_out_file = null,
+ .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
+ .prelink_group = event.Group(BuildError!void).init(loop),
+ .deinit_group = event.Group(void).init(loop),
+ .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
+ .int_type_table = event.Locked(IntTypeTable).init(loop, IntTypeTable.init(loop.allocator)),
+ .array_type_table = event.Locked(ArrayTypeTable).init(loop, ArrayTypeTable.init(loop.allocator)),
+ .ptr_type_table = event.Locked(PtrTypeTable).init(loop, PtrTypeTable.init(loop.allocator)),
+ .fn_type_table = event.Locked(FnTypeTable).init(loop, FnTypeTable.init(loop.allocator)),
+ .c_int_types = undefined,
+
+ .meta_type = undefined,
+ .void_type = undefined,
+ .void_value = undefined,
+ .bool_type = undefined,
+ .true_value = undefined,
+ .false_value = undefined,
+ .noreturn_type = undefined,
+ .noreturn_value = undefined,
+ .comptime_int_type = undefined,
+ .u8_type = undefined,
+
+ .target_machine = undefined,
+ .target_data_ref = undefined,
+ .target_layout_str = undefined,
+ .target_ptr_bits = target.getArchPtrBitWidth(),
+
+ .root_package = undefined,
+ .std_package = undefined,
+
+ .override_libc = null,
+ .destroy_handle = undefined,
+ .have_err_ret_tracing = false,
+ .primitive_type_table = undefined,
+ });
+ errdefer {
+ comp.int_type_table.private_data.deinit();
+ comp.array_type_table.private_data.deinit();
+ comp.ptr_type_table.private_data.deinit();
+ comp.fn_type_table.private_data.deinit();
+ comp.arena_allocator.deinit();
+ comp.loop.allocator.destroy(comp);
+ }
+
+ comp.name = try Buffer.init(comp.arena(), name);
+ comp.llvm_triple = try target.getTriple(comp.arena());
+ comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
+ comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
+ comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
+ comp.primitive_type_table = TypeTable.init(comp.arena());
+
+ const opt_level = switch (build_mode) {
+ builtin.Mode.Debug => llvm.CodeGenLevelNone,
+ else => llvm.CodeGenLevelAggressive,
+ };
+
+ const reloc_mode = if (is_static) llvm.RelocStatic else llvm.RelocPIC;
+
+ // LLVM creates invalid binaries on Windows sometimes.
+ // See https://github.com/ziglang/zig/issues/508
+ // As a workaround we do not use target native features on Windows.
+ var target_specific_cpu_args: ?[*]u8 = null;
+ var target_specific_cpu_features: ?[*]u8 = null;
+ errdefer llvm.DisposeMessage(target_specific_cpu_args);
+ errdefer llvm.DisposeMessage(target_specific_cpu_features);
+ if (target == Target.Native and !target.isWindows()) {
+ target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory;
+ target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory;
+ }
+
+ comp.target_machine = llvm.CreateTargetMachine(
+ comp.llvm_target,
+ comp.llvm_triple.ptr(),
+ target_specific_cpu_args orelse c"",
+ target_specific_cpu_features orelse c"",
+ opt_level,
+ reloc_mode,
+ llvm.CodeModelDefault,
+ ) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeTargetMachine(comp.target_machine);
+
+ comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeTargetData(comp.target_data_ref);
+
+ comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeMessage(comp.target_layout_str);
+
+ comp.events = try event.Channel(Event).create(comp.loop, 0);
+ errdefer comp.events.destroy();
+
+ if (root_src_path) |root_src| {
+ const dirname = std.os.path.dirname(root_src) orelse ".";
+ const basename = std.os.path.basename(root_src);
+
+ comp.root_package = try Package.create(comp.arena(), dirname, basename);
+ comp.std_package = try Package.create(comp.arena(), comp.zig_std_dir, "index.zig");
+ try comp.root_package.add("std", comp.std_package);
+ } else {
+ comp.root_package = try Package.create(comp.arena(), ".", "");
+ }
+
+ try comp.initTypes();
+
+ comp.destroy_handle = try async<loop.allocator> comp.internalDeinit();
+
+ return comp;
+ }
+
+ /// it does ref the result because it could be an arbitrary integer size
+ pub async fn getPrimitiveType(comp: *Compilation, name: []const u8) !?*Type {
+ if (name.len >= 2) {
+ switch (name[0]) {
+ 'i', 'u' => blk: {
+ for (name[1..]) |byte|
+ switch (byte) {
+ '0'...'9' => {},
+ else => break :blk,
+ };
+ const is_signed = name[0] == 'i';
+ const bit_count = std.fmt.parseUnsigned(u32, name[1..], 10) catch |err| switch (err) {
+ error.Overflow => return error.Overflow,
+ error.InvalidCharacter => unreachable, // we just checked the characters above
+ };
+ const int_type = try await (async Type.Int.get(comp, Type.Int.Key{
+ .bit_count = bit_count,
+ .is_signed = is_signed,
+ }) catch unreachable);
+ errdefer int_type.base.base.deref();
+ return &int_type.base;
+ },
+ else => {},
+ }
+ }
+
+ if (comp.primitive_type_table.get(name)) |entry| {
+ entry.value.base.ref();
+ return entry.value;
+ }
+
+ return null;
+ }
+
+ fn initTypes(comp: *Compilation) !void {
+ comp.meta_type = try comp.arena().create(Type.MetaType{
+ .base = Type{
+ .name = "type",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = undefined,
+ .ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
+ },
+ .id = builtin.TypeId.Type,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .value = undefined,
+ });
+ comp.meta_type.value = &comp.meta_type.base;
+ comp.meta_type.base.base.typ = &comp.meta_type.base;
+ assert((try comp.primitive_type_table.put(comp.meta_type.base.name, &comp.meta_type.base)) == null);
+
+ comp.void_type = try comp.arena().create(Type.Void{
+ .base = Type{
+ .name = "void",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Void,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.void_type.base.name, &comp.void_type.base)) == null);
+
+ comp.noreturn_type = try comp.arena().create(Type.NoReturn{
+ .base = Type{
+ .name = "noreturn",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.NoReturn,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.noreturn_type.base.name, &comp.noreturn_type.base)) == null);
+
+ comp.comptime_int_type = try comp.arena().create(Type.ComptimeInt{
+ .base = Type{
+ .name = "comptime_int",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.ComptimeInt,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.comptime_int_type.base.name, &comp.comptime_int_type.base)) == null);
+
+ comp.bool_type = try comp.arena().create(Type.Bool{
+ .base = Type{
+ .name = "bool",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Bool,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.bool_type.base.name, &comp.bool_type.base)) == null);
+
+ comp.void_value = try comp.arena().create(Value.Void{
+ .base = Value{
+ .id = Value.Id.Void,
+ .typ = &Type.Void.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+
+ comp.true_value = try comp.arena().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typ = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = true,
+ });
+
+ comp.false_value = try comp.arena().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typ = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = false,
+ });
+
+ comp.noreturn_value = try comp.arena().create(Value.NoReturn{
+ .base = Value{
+ .id = Value.Id.NoReturn,
+ .typ = &Type.NoReturn.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+
+ for (CInt.list) |cint, i| {
+ const c_int_type = try comp.arena().create(Type.Int{
+ .base = Type{
+ .name = cint.zig_name,
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Int,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .key = Type.Int.Key{
+ .is_signed = cint.is_signed,
+ .bit_count = comp.target.cIntTypeSizeInBits(cint.id),
+ },
+ .garbage_node = undefined,
+ });
+ comp.c_int_types[i] = c_int_type;
+ assert((try comp.primitive_type_table.put(cint.zig_name, &c_int_type.base)) == null);
+ }
+ comp.u8_type = try comp.arena().create(Type.Int{
+ .base = Type{
+ .name = "u8",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Int,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .key = Type.Int.Key{
+ .is_signed = false,
+ .bit_count = 8,
+ },
+ .garbage_node = undefined,
+ });
+ assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null);
+ }
+
+ /// This function can safely use async/await, because it manages Compilation's lifetime,
+ /// and EventLoopLocal.deinit will not be called until the event.Loop.run() completes.
+ async fn internalDeinit(self: *Compilation) void {
+ suspend;
+
+ await (async self.deinit_group.wait() catch unreachable);
+ if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
+ // TODO evented I/O?
+ os.deleteTree(self.arena(), tmp_dir) catch {};
+ } else |_| {};
+
+ self.events.destroy();
+
+ llvm.DisposeMessage(self.target_layout_str);
+ llvm.DisposeTargetData(self.target_data_ref);
+ llvm.DisposeTargetMachine(self.target_machine);
+
+ self.primitive_type_table.deinit();
+
+ self.arena_allocator.deinit();
+ self.gpa().destroy(self);
+ }
+
+ pub fn destroy(self: *Compilation) void {
+ resume self.destroy_handle;
+ }
+
+ pub fn build(self: *Compilation) !void {
+ if (self.llvm_argv.len != 0) {
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{
+ [][]const u8{"zig (LLVM option parsing)"},
+ self.llvm_argv,
+ });
+ defer c_compatible_args.deinit();
+ // TODO this sets global state
+ c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
+ }
+
+ _ = try async<self.gpa()> self.buildAsync();
+ }
+
+ async fn buildAsync(self: *Compilation) void {
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_result = await (async self.compileAndLink() catch unreachable);
+
+ // this makes a handy error return trace and stack trace in debug mode
+ if (std.debug.runtime_safety) {
+ build_result catch unreachable;
+ }
+
+ const compile_errors = blk: {
+ const held = await (async self.compile_errors.acquire() catch unreachable);
+ defer held.release();
+ break :blk held.value.toOwnedSlice();
+ };
+
+ if (build_result) |_| {
+ if (compile_errors.len == 0) {
+ await (async self.events.put(Event.Ok) catch unreachable);
+ } else {
+ await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ }
+ } else |err| {
+ // if there's an error then the compile errors have dangling references
+ self.gpa().free(compile_errors);
+
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
+ }
+
+ // for now we stop after 1
+ return;
+ }
+ }
+
+ async fn compileAndLink(self: *Compilation) !void {
+ if (self.root_src_path) |root_src_path| {
+ // TODO async/await os.path.real
+ const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
+ try printError("unable to get real path '{}': {}", root_src_path, err);
+ return err;
+ };
+ const root_scope = blk: {
+ errdefer self.gpa().free(root_src_real_path);
+
+ // TODO async/await readFileAlloc()
+ const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| {
+ try printError("unable to open '{}': {}", root_src_real_path, err);
+ return err;
+ };
+ errdefer self.gpa().free(source_code);
+
+ const tree = try self.gpa().createOne(ast.Tree);
+ tree.* = try std.zig.parse(self.gpa(), source_code);
+ errdefer {
+ tree.deinit();
+ self.gpa().destroy(tree);
+ }
+
+ break :blk try Scope.Root.create(self, tree, root_src_real_path);
+ };
+ defer root_scope.base.deref(self);
+ const tree = root_scope.tree;
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try Msg.createFromParseErrorAndScope(self, root_scope, parse_error);
+ errdefer msg.destroy();
+
+ try await (async self.addCompileErrorAsync(msg) catch unreachable);
+ }
+ if (tree.errors.len != 0) {
+ return;
+ }
+
+ const decls = try Scope.Decls.create(self, &root_scope.base);
+ defer decls.base.deref(self);
+
+ var decl_group = event.Group(BuildError!void).init(self.loop);
+ var decl_group_consumed = false;
+ errdefer if (!decl_group_consumed) decl_group.cancelAll();
+
+ var it = tree.root_node.decls.iterator(0);
+ while (it.next()) |decl_ptr| {
+ const decl = decl_ptr.*;
+ switch (decl.id) {
+ ast.Node.Id.Comptime => {
+ const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
+
+ try self.prelink_group.call(addCompTimeBlock, self, &decls.base, comptime_node);
+ },
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
+ try self.addCompileError(root_scope, Span{
+ .first = fn_proto.fn_token,
+ .last = fn_proto.fn_token + 1,
+ }, "missing function name");
+ continue;
+ };
+
+ const fn_decl = try self.gpa().create(Decl.Fn{
+ .base = Decl{
+ .id = Decl.Id.Fn,
+ .name = name,
+ .visib = parseVisibToken(tree, fn_proto.visib_token),
+ .resolution = event.Future(BuildError!void).init(self.loop),
+ .parent_scope = &decls.base,
+ },
+ .value = Decl.Fn.Val{ .Unresolved = {} },
+ .fn_proto = fn_proto,
+ });
+ errdefer self.gpa().destroy(fn_decl);
+
+ try decl_group.call(addTopLevelDecl, self, decls, &fn_decl.base);
+ },
+ ast.Node.Id.TestDecl => @panic("TODO"),
+ else => unreachable,
+ }
+ }
+ decl_group_consumed = true;
+ try await (async decl_group.wait() catch unreachable);
+
+ // Now other code can rely on the decls scope having a complete list of names.
+ decls.name_future.resolve();
+ }
+
+ (await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) {
+ error.SemanticAnalysisFailed => {},
+ else => return err,
+ };
+
+ const any_prelink_errors = blk: {
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ break :blk compile_errors.value.len != 0;
+ };
+
+ if (!any_prelink_errors) {
+ try await (async link(self) catch unreachable);
+ }
+ }
+
+ /// caller takes ownership of resulting Code
+ async fn genAndAnalyzeCode(
+ comp: *Compilation,
+ scope: *Scope,
+ node: *ast.Node,
+ expected_type: ?*Type,
+ ) !*ir.Code {
+ const unanalyzed_code = try await (async ir.gen(
+ comp,
+ node,
+ scope,
+ ) catch unreachable);
+ defer unanalyzed_code.destroy(comp.gpa());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("unanalyzed:\n");
+ unanalyzed_code.dump();
+ }
+
+ const analyzed_code = try await (async ir.analyze(
+ comp,
+ unanalyzed_code,
+ expected_type,
+ ) catch unreachable);
+ errdefer analyzed_code.destroy(comp.gpa());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("analyzed:\n");
+ analyzed_code.dump();
+ }
+
+ return analyzed_code;
+ }
+
+ async fn addCompTimeBlock(
+ comp: *Compilation,
+ scope: *Scope,
+ comptime_node: *ast.Node.Comptime,
+ ) !void {
+ const void_type = Type.Void.get(comp);
+ defer void_type.base.base.deref(comp);
+
+ const analyzed_code = (await (async genAndAnalyzeCode(
+ comp,
+ scope,
+ comptime_node.expr,
+ &void_type.base,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that comp.compile_errors is populated.
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ analyzed_code.destroy(comp.gpa());
+ }
+
+ async fn addTopLevelDecl(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
+ const tree = decl.findRootScope().tree;
+ const is_export = decl.isExported(tree);
+
+ var add_to_table_resolved = false;
+ const add_to_table = async self.addDeclToTable(decls, decl) catch unreachable;
+ errdefer if (!add_to_table_resolved) cancel add_to_table; // TODO https://github.com/ziglang/zig/issues/1261
+
+ if (is_export) {
+ try self.prelink_group.call(verifyUniqueSymbol, self, decl);
+ try self.prelink_group.call(resolveDecl, self, decl);
+ }
+
+ add_to_table_resolved = true;
+ try await add_to_table;
+ }
+
+ async fn addDeclToTable(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
+ const held = await (async decls.table.acquire() catch unreachable);
+ defer held.release();
+
+ if (try held.value.put(decl.name, decl)) |other_decl| {
+ try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
+ // TODO note: other definition here
+ }
+ }
+
+ fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void {
+ const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
+ errdefer self.gpa().free(text);
+
+ const msg = try Msg.createFromScope(self, root, span, text);
+ errdefer msg.destroy();
+
+ try self.prelink_group.call(addCompileErrorAsync, self, msg);
+ }
+
+ async fn addCompileErrorAsync(
+ self: *Compilation,
+ msg: *Msg,
+ ) !void {
+ errdefer msg.destroy();
+
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ try compile_errors.value.append(msg);
+ }
+
+ async fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) !void {
+ const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
+ defer exported_symbol_names.release();
+
+ if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
+ try self.addCompileError(
+ decl.findRootScope(),
+ decl.getSpan(),
+ "exported symbol collision: '{}'",
+ decl.name,
+ );
+ // TODO add error note showing location of other symbol
+ }
+ }
+
+ pub fn haveLibC(self: *Compilation) bool {
+ return self.libc_link_lib != null;
+ }
+
+ pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib {
+ const is_libc = mem.eql(u8, name, "c");
+
+ if (is_libc) {
+ if (self.libc_link_lib) |libc_link_lib| {
+ return libc_link_lib;
+ }
+ }
+
+ for (self.link_libs_list.toSliceConst()) |existing_lib| {
+ if (mem.eql(u8, name, existing_lib.name)) {
+ return existing_lib;
+ }
+ }
+
+ const link_lib = try self.gpa().create(LinkLib{
+ .name = name,
+ .path = null,
+ .provided_explicitly = provided_explicitly,
+ .symbols = ArrayList([]u8).init(self.gpa()),
+ });
+ try self.link_libs_list.append(link_lib);
+ if (is_libc) {
+ self.libc_link_lib = link_lib;
+
+ // get a head start on looking for the native libc
+ if (self.target == Target.Native and self.override_libc == null) {
+ try self.deinit_group.call(startFindingNativeLibC, self);
+ }
+ }
+ return link_lib;
+ }
+
+ /// cancels itself so no need to await or cancel the promise.
+ async fn startFindingNativeLibC(self: *Compilation) void {
+ await (async self.loop.yield() catch unreachable);
+ // we don't care if it fails, we're just trying to kick off the future resolution
+ _ = (await (async self.event_loop_local.getNativeLibC() catch unreachable)) catch return;
+ }
+
+ /// General Purpose Allocator. Must free when done.
+ fn gpa(self: Compilation) *mem.Allocator {
+ return self.loop.allocator;
+ }
+
+ /// Arena Allocator. Automatically freed when the Compilation is destroyed.
+ fn arena(self: *Compilation) *mem.Allocator {
+ return &self.arena_allocator.allocator;
+ }
+
+ /// If the temporary directory for this compilation has not been created, it creates it.
+ /// Then it creates a random file name in that dir and returns it.
+ pub async fn createRandomOutputPath(self: *Compilation, suffix: []const u8) !Buffer {
+ const tmp_dir = try await (async self.getTmpDir() catch unreachable);
+ const file_prefix = await (async self.getRandomFileName() catch unreachable);
+
+ const file_name = try std.fmt.allocPrint(self.gpa(), "{}{}", file_prefix[0..], suffix);
+ defer self.gpa().free(file_name);
+
+ const full_path = try os.path.join(self.gpa(), tmp_dir, file_name[0..]);
+ errdefer self.gpa().free(full_path);
+
+ return Buffer.fromOwnedSlice(self.gpa(), full_path);
+ }
+
+ /// If the temporary directory for this Compilation has not been created, creates it.
+ /// Then returns it. The directory is unique to this Compilation and cleaned up when
+ /// the Compilation deinitializes.
+ async fn getTmpDir(self: *Compilation) ![]const u8 {
+ if (await (async self.tmp_dir.start() catch unreachable)) |ptr| return ptr.*;
+ self.tmp_dir.data = await (async self.getTmpDirImpl() catch unreachable);
+ self.tmp_dir.resolve();
+ return self.tmp_dir.data;
+ }
+
+ async fn getTmpDirImpl(self: *Compilation) ![]u8 {
+ const comp_dir_name = await (async self.getRandomFileName() catch unreachable);
+ const zig_dir_path = try getZigDir(self.gpa());
+ defer self.gpa().free(zig_dir_path);
+
+ const tmp_dir = try os.path.join(self.arena(), zig_dir_path, comp_dir_name[0..]);
+ try os.makePath(self.gpa(), tmp_dir);
+ return tmp_dir;
+ }
+
+ async fn getRandomFileName(self: *Compilation) [12]u8 {
+ // here we replace the standard +/ with -_ so that it can be used in a file name
+ const b64_fs_encoder = std.base64.Base64Encoder.init(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
+ std.base64.standard_pad_char,
+ );
+
+ var rand_bytes: [9]u8 = undefined;
+
+ {
+ const held = await (async self.event_loop_local.prng.acquire() catch unreachable);
+ defer held.release();
+
+ held.value.random.bytes(rand_bytes[0..]);
+ }
+
+ var result: [12]u8 = undefined;
+ b64_fs_encoder.encode(result[0..], rand_bytes);
+ return result;
+ }
+
+ fn registerGarbage(comp: *Compilation, comptime T: type, node: *std.atomic.Stack(*T).Node) void {
+ // TODO put the garbage somewhere
+ }
+
+ /// Returns a value which has been ref()'d once
+ async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value {
+ const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable);
+ defer analyzed_code.destroy(comp.gpa());
+
+ return analyzed_code.getCompTimeResult(comp);
+ }
+
+ async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type {
+ const meta_type = &Type.MetaType.get(comp).base;
+ defer meta_type.base.deref(comp);
+
+ const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable);
+ errdefer result_val.base.deref(comp);
+
+ return result_val.cast(Type).?;
+ }
+
+ /// This declaration has been blessed as going into the final code generation.
+ pub async fn resolveDecl(comp: *Compilation, decl: *Decl) !void {
+ if (await (async decl.resolution.start() catch unreachable)) |ptr| return ptr.*;
+
+ decl.resolution.data = try await (async generateDecl(comp, decl) catch unreachable);
+ decl.resolution.resolve();
+ return decl.resolution.data;
+ }
+};
+
+fn printError(comptime format: []const u8, args: ...) !void {
+ var stderr_file = try std.io.getStdErr();
+ var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
+ const out_stream = &stderr_file_out_stream.stream;
+ try out_stream.print(format, args);
+}
+
+fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
+ if (optional_token_index) |token_index| {
+ const token = tree.tokens.at(token_index);
+ assert(token.id == Token.Id.Keyword_pub);
+ return Visib.Pub;
+ } else {
+ return Visib.Private;
+ }
+}
+
+/// The function that actually does the generation.
+async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
+ switch (decl.id) {
+ Decl.Id.Var => @panic("TODO"),
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
+ return await (async generateDeclFn(comp, fn_decl) catch unreachable);
+ },
+ Decl.Id.CompTime => @panic("TODO"),
+ }
+}
+
+async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
+
+ const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
+ defer fndef_scope.base.deref(comp);
+
+ const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ defer fn_type.base.base.deref(comp);
+
+ var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
+ var symbol_name_consumed = false;
+ errdefer if (!symbol_name_consumed) symbol_name.deinit();
+
+ // The Decl.Fn owns the initial 1 reference count
+ const fn_val = try Value.Fn.create(comp, fn_type, fndef_scope, symbol_name);
+ fn_decl.value = Decl.Fn.Val{ .Fn = fn_val };
+ symbol_name_consumed = true;
+
+ // Define local parameter variables
+ const root_scope = fn_decl.base.findRootScope();
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
+ const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
+ const name_token = param_decl.name_token orelse {
+ try comp.addCompileError(root_scope, Span{
+ .first = param_decl.firstToken(),
+ .last = param_decl.type_node.firstToken(),
+ }, "missing parameter name");
+ return error.SemanticAnalysisFailed;
+ };
+ const param_name = root_scope.tree.tokenSlice(name_token);
+
+ // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
+ // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
+ // }
+
+ // TODO check for shadowing
+
+ const var_scope = try Scope.Var.createParam(
+ comp,
+ fn_val.child_scope,
+ param_name,
+ &param_decl.base,
+ i,
+ param.typ,
+ );
+ fn_val.child_scope = &var_scope.base;
+
+ try fn_type.non_key.Normal.variable_list.append(var_scope);
+ }
+
+ const analyzed_code = try await (async comp.genAndAnalyzeCode(
+ fn_val.child_scope,
+ body_node,
+ fn_type.key.data.Normal.return_type,
+ ) catch unreachable);
+ errdefer analyzed_code.destroy(comp.gpa());
+
+ assert(fn_val.block_scope != null);
+
+ // Kick off rendering to LLVM module, but it doesn't block the fn decl
+ // analysis from being complete.
+ try comp.prelink_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
+ try comp.prelink_group.call(addFnToLinkSet, comp, fn_val);
+}
+
+async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) void {
+ fn_val.base.ref();
+ defer fn_val.base.deref(comp);
+
+ fn_val.link_set_node.data = fn_val;
+
+ const held = await (async comp.fn_link_set.acquire() catch unreachable);
+ defer held.release();
+
+ held.value.append(fn_val.link_set_node);
+}
+
+fn getZigDir(allocator: *mem.Allocator) ![]u8 {
+ return os.getAppDataDir(allocator, "zig");
+}
+
+async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn {
+ const return_type_node = switch (fn_proto.return_type) {
+ ast.Node.FnProto.ReturnType.Explicit => |n| n,
+ ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
+ };
+ const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable);
+ return_type.base.deref(comp);
+
+ var params = ArrayList(Type.Fn.Param).init(comp.gpa());
+ var params_consumed = false;
+ defer if (!params_consumed) {
+ for (params.toSliceConst()) |param| {
+ param.typ.base.deref(comp);
+ }
+ params.deinit();
+ };
+
+ {
+ var it = fn_proto.params.iterator(0);
+ while (it.next()) |param_node_ptr| {
+ const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
+ const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable);
+ errdefer param_type.base.deref(comp);
+ try params.append(Type.Fn.Param{
+ .typ = param_type,
+ .is_noalias = param_node.noalias_token != null,
+ });
+ }
+ }
+
+ const key = Type.Fn.Key{
+ .alignment = null,
+ .data = Type.Fn.Key.Data{
+ .Normal = Type.Fn.Key.Normal{
+ .return_type = return_type,
+ .params = params.toOwnedSlice(),
+ .is_var_args = false, // TODO
+ .cc = Type.Fn.CallingConvention.Auto, // TODO
+ },
+ },
+ };
+ params_consumed = true;
+ var key_consumed = false;
+ defer if (!key_consumed) {
+ for (key.data.Normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ comp.gpa().free(key.data.Normal.params);
+ };
+
+ const fn_type = try await (async Type.Fn.get(comp, key) catch unreachable);
+ key_consumed = true;
+ errdefer fn_type.base.base.deref(comp);
+
+ return fn_type;
+}
+
+async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ defer fn_type.base.base.deref(comp);
+
+ var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
+ var symbol_name_consumed = false;
+ defer if (!symbol_name_consumed) symbol_name.deinit();
+
+ // The Decl.Fn owns the initial 1 reference count
+ const fn_proto_val = try Value.FnProto.create(comp, fn_type, symbol_name);
+ fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val };
+ symbol_name_consumed = true;
+}
diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig
new file mode 100644
index 0000000000..6e80243038
--- /dev/null
+++ b/src-self-hosted/decl.zig
@@ -0,0 +1,98 @@
+const std = @import("std");
+const Allocator = mem.Allocator;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Visib = @import("visib.zig").Visib;
+const event = std.event;
+const Value = @import("value.zig").Value;
+const Token = std.zig.Token;
+const errmsg = @import("errmsg.zig");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+
+pub const Decl = struct {
+ id: Id,
+ name: []const u8,
+ visib: Visib,
+ resolution: event.Future(Compilation.BuildError!void),
+ parent_scope: *Scope,
+
+ pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ return fn_decl.isExported(tree);
+ },
+ else => return false,
+ }
+ }
+
+ pub fn getSpan(base: *const Decl) errmsg.Span {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ const fn_proto = fn_decl.fn_proto;
+ const start = fn_proto.fn_token;
+ const end = fn_proto.name_token orelse start;
+ return errmsg.Span{
+ .first = start,
+ .last = end + 1,
+ };
+ },
+ else => @panic("TODO"),
+ }
+ }
+
+ pub fn findRootScope(base: *const Decl) *Scope.Root {
+ return base.parent_scope.findRoot();
+ }
+
+ pub const Id = enum {
+ Var,
+ Fn,
+ CompTime,
+ };
+
+ pub const Var = struct {
+ base: Decl,
+ };
+
+ pub const Fn = struct {
+ base: Decl,
+ value: Val,
+ fn_proto: *ast.Node.FnProto,
+
+ // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
+ pub const Val = union(enum) {
+ Unresolved: void,
+ Fn: *Value.Fn,
+ FnProto: *Value.FnProto,
+ };
+
+ pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
+ return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
+ const token = tree.tokens.at(tok_index);
+ break :x switch (token.id) {
+ Token.Id.Extern => tree.tokenSlicePtr(token),
+ else => null,
+ };
+ } else null;
+ }
+
+ pub fn isExported(self: Fn, tree: *ast.Tree) bool {
+ if (self.fn_proto.extern_export_inline_token) |tok_index| {
+ const token = tree.tokens.at(tok_index);
+ return token.id == Token.Id.Keyword_export;
+ } else {
+ return false;
+ }
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Decl,
+ };
+};
+
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
new file mode 100644
index 0000000000..51e135686a
--- /dev/null
+++ b/src-self-hosted/errmsg.zig
@@ -0,0 +1,237 @@
+const std = @import("std");
+const mem = std.mem;
+const os = std.os;
+const Token = std.zig.Token;
+const ast = std.zig.ast;
+const TokenIndex = std.zig.ast.TokenIndex;
+const Compilation = @import("compilation.zig").Compilation;
+const Scope = @import("scope.zig").Scope;
+
+pub const Color = enum {
+ Auto,
+ Off,
+ On,
+};
+
+pub const Span = struct {
+ first: ast.TokenIndex,
+ last: ast.TokenIndex,
+
+ pub fn token(i: TokenIndex) Span {
+ return Span{
+ .first = i,
+ .last = i,
+ };
+ }
+
+ pub fn node(n: *ast.Node) Span {
+ return Span{
+ .first = n.firstToken(),
+ .last = n.lastToken(),
+ };
+ }
+};
+
+pub const Msg = struct {
+ span: Span,
+ text: []u8,
+ data: Data,
+
+ const Data = union(enum) {
+ PathAndTree: PathAndTree,
+ ScopeAndComp: ScopeAndComp,
+ };
+
+ const PathAndTree = struct {
+ realpath: []const u8,
+ tree: *ast.Tree,
+ allocator: *mem.Allocator,
+ };
+
+ const ScopeAndComp = struct {
+ root_scope: *Scope.Root,
+ compilation: *Compilation,
+ };
+
+ pub fn destroy(self: *Msg) void {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ path_and_tree.allocator.free(self.text);
+ path_and_tree.allocator.destroy(self);
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ scope_and_comp.root_scope.base.deref(scope_and_comp.compilation);
+ scope_and_comp.compilation.gpa().free(self.text);
+ scope_and_comp.compilation.gpa().destroy(self);
+ },
+ }
+ }
+
+ fn getAllocator(self: *const Msg) *mem.Allocator {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.allocator;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.compilation.gpa();
+ },
+ }
+ }
+
+ pub fn getRealPath(self: *const Msg) []const u8 {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.realpath;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.root_scope.realpath;
+ },
+ }
+ }
+
+ pub fn getTree(self: *const Msg) *ast.Tree {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.tree;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.root_scope.tree;
+ },
+ }
+ }
+
+ /// Takes ownership of text
+ /// References root_scope, and derefs when the msg is freed
+ pub fn createFromScope(comp: *Compilation, root_scope: *Scope.Root, span: Span, text: []u8) !*Msg {
+ const msg = try comp.gpa().create(Msg{
+ .text = text,
+ .span = span,
+ .data = Data{
+ .ScopeAndComp = ScopeAndComp{
+ .root_scope = root_scope,
+ .compilation = comp,
+ },
+ },
+ });
+ root_scope.base.ref();
+ return msg;
+ }
+
+ pub fn createFromParseErrorAndScope(
+ comp: *Compilation,
+ root_scope: *Scope.Root,
+ parse_error: *const ast.Error,
+ ) !*Msg {
+ const loc_token = parse_error.loc();
+ var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
+ defer text_buf.deinit();
+
+ var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ try parse_error.render(&root_scope.tree.tokens, out_stream);
+
+ const msg = try comp.gpa().create(Msg{
+ .text = undefined,
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
+ .data = Data{
+ .ScopeAndComp = ScopeAndComp{
+ .root_scope = root_scope,
+ .compilation = comp,
+ },
+ },
+ });
+ root_scope.base.ref();
+ msg.text = text_buf.toOwnedSlice();
+ return msg;
+ }
+
+ /// `realpath` must outlive the returned Msg
+ /// `tree` must outlive the returned Msg
+ /// Caller owns returned Msg and must free with `allocator`
+ /// allocator will additionally be used for printing messages later.
+ pub fn createFromParseError(
+ allocator: *mem.Allocator,
+ parse_error: *const ast.Error,
+ tree: *ast.Tree,
+ realpath: []const u8,
+ ) !*Msg {
+ const loc_token = parse_error.loc();
+ var text_buf = try std.Buffer.initSize(allocator, 0);
+ defer text_buf.deinit();
+
+ var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ try parse_error.render(&tree.tokens, out_stream);
+
+ const msg = try allocator.create(Msg{
+ .text = undefined,
+ .data = Data{
+ .PathAndTree = PathAndTree{
+ .allocator = allocator,
+ .realpath = realpath,
+ .tree = tree,
+ },
+ },
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
+ });
+ msg.text = text_buf.toOwnedSlice();
+ errdefer allocator.destroy(msg);
+
+ return msg;
+ }
+
+ pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
+ const allocator = msg.getAllocator();
+ const realpath = msg.getRealPath();
+ const tree = msg.getTree();
+
+ const cwd = try os.getCwd(allocator);
+ defer allocator.free(cwd);
+
+ const relpath = try os.path.relative(allocator, cwd, realpath);
+ defer allocator.free(relpath);
+
+ const path = if (relpath.len < realpath.len) relpath else realpath;
+
+ const first_token = tree.tokens.at(msg.span.first);
+ const last_token = tree.tokens.at(msg.span.last);
+ const start_loc = tree.tokenLocationPtr(0, first_token);
+ const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
+ if (!color_on) {
+ try stream.print(
+ "{}:{}:{}: error: {}\n",
+ path,
+ start_loc.line + 1,
+ start_loc.column + 1,
+ msg.text,
+ );
+ return;
+ }
+
+ try stream.print(
+ "{}:{}:{}: error: {}\n{}\n",
+ path,
+ start_loc.line + 1,
+ start_loc.column + 1,
+ msg.text,
+ tree.source[start_loc.line_start..start_loc.line_end],
+ );
+ try stream.writeByteNTimes(' ', start_loc.column);
+ try stream.writeByteNTimes('~', last_token.end - first_token.start);
+ try stream.write("\n");
+ }
+
+ pub fn printToFile(msg: *const Msg, file: *os.File, color: Color) !void {
+ const color_on = switch (color) {
+ Color.Auto => file.isTty(),
+ Color.On => true,
+ Color.Off => false,
+ };
+ var stream = &std.io.FileOutStream.init(file).stream;
+ return msg.printToStream(stream, color_on);
+ }
+};
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 3f1fefdd5a..ecd04c4467 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
-pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 {
+pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig");
errdefer allocator.free(test_zig_dir);
@@ -21,13 +21,13 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
-pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator);
defer allocator.free(self_exe_path);
var cur_path: []const u8 = self_exe_path;
while (true) {
- const test_dir = os.path.dirname(cur_path);
+ const test_dir = os.path.dirname(cur_path) orelse ".";
if (mem.eql(u8, test_dir, cur_path)) {
break;
@@ -42,16 +42,19 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
return error.FileNotFound;
}
-pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.
\\Reinstall Zig or use --zig-install-prefix.
\\
- ,
- @errorName(err)
- );
+ , @errorName(err));
return error.ZigLibDirNotFound;
};
}
+
+/// Caller must free result
+pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
+ return std.mem.dupe(allocator, u8, "zig-cache");
+}
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index b66a0abdee..619cd4f330 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -1,112 +1,2598 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Compilation = @import("compilation.zig").Compilation;
const Scope = @import("scope.zig").Scope;
+const ast = std.zig.ast;
+const Allocator = std.mem.Allocator;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const assert = std.debug.assert;
+const Token = std.zig.Token;
+const Span = @import("errmsg.zig").Span;
+const llvm = @import("llvm.zig");
+const codegen = @import("codegen.zig");
+const ObjectFile = codegen.ObjectFile;
+const Decl = @import("decl.zig").Decl;
+const mem = std.mem;
-pub const Instruction = struct {
+pub const LVal = enum {
+ None,
+ Ptr,
+};
+
+pub const IrVal = union(enum) {
+ Unknown,
+ KnownType: *Type,
+ KnownValue: *Value,
+
+ const Init = enum {
+ Unknown,
+ NoReturn,
+ Void,
+ };
+
+ pub fn dump(self: IrVal) void {
+ switch (self) {
+ IrVal.Unknown => std.debug.warn("Unknown"),
+ IrVal.KnownType => |typ| {
+ std.debug.warn("KnownType(");
+ typ.dump();
+ std.debug.warn(")");
+ },
+ IrVal.KnownValue => |value| {
+ std.debug.warn("KnownValue(");
+ value.dump();
+ std.debug.warn(")");
+ },
+ }
+ }
+};
+
+pub const Inst = struct {
id: Id,
- scope: &Scope,
+ scope: *Scope,
+ debug_id: usize,
+ val: IrVal,
+ ref_count: usize,
+ span: Span,
+ owner_bb: *BasicBlock,
+
+ /// true if this instruction was generated by zig and not from user code
+ is_generated: bool,
+
+ /// the instruction that is derived from this one in analysis
+ child: ?*Inst,
+
+ /// the instruction that this one derives from in analysis
+ parent: ?*Inst,
+
+ /// populated durign codegen
+ llvm_value: ?llvm.ValueRef,
+
+ pub fn cast(base: *Inst, comptime T: type) ?*T {
+ if (base.id == comptime typeToId(T)) {
+ return @fieldParentPtr(T, "base", base);
+ }
+ return null;
+ }
+
+ pub fn typeToId(comptime T: type) Id {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (T == @field(Inst, @memberName(Id, i))) {
+ return @field(Id, @memberName(Id, i));
+ }
+ }
+ unreachable;
+ }
+
+ pub fn dump(base: *const Inst) void {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Inst, @memberName(Id, i));
+ std.debug.warn("#{} = {}(", base.debug_id, @tagName(base.id));
+ @fieldParentPtr(T, "base", base).dump();
+ std.debug.warn(")");
+ return;
+ }
+ }
+ unreachable;
+ }
+
+ pub fn hasSideEffects(base: *const Inst) bool {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Inst, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).hasSideEffects();
+ }
+ }
+ unreachable;
+ }
+
+ pub async fn analyze(base: *Inst, ira: *Analyze) Analyze.Error!*Inst {
+ switch (base.id) {
+ Id.Return => return @fieldParentPtr(Return, "base", base).analyze(ira),
+ Id.Const => return @fieldParentPtr(Const, "base", base).analyze(ira),
+ Id.Call => return @fieldParentPtr(Call, "base", base).analyze(ira),
+ Id.DeclRef => return await (async @fieldParentPtr(DeclRef, "base", base).analyze(ira) catch unreachable),
+ Id.Ref => return await (async @fieldParentPtr(Ref, "base", base).analyze(ira) catch unreachable),
+ Id.DeclVar => return @fieldParentPtr(DeclVar, "base", base).analyze(ira),
+ Id.CheckVoidStmt => return @fieldParentPtr(CheckVoidStmt, "base", base).analyze(ira),
+ Id.Phi => return @fieldParentPtr(Phi, "base", base).analyze(ira),
+ Id.Br => return @fieldParentPtr(Br, "base", base).analyze(ira),
+ Id.AddImplicitReturnType => return @fieldParentPtr(AddImplicitReturnType, "base", base).analyze(ira),
+ Id.PtrType => return await (async @fieldParentPtr(PtrType, "base", base).analyze(ira) catch unreachable),
+ Id.VarPtr => return await (async @fieldParentPtr(VarPtr, "base", base).analyze(ira) catch unreachable),
+ Id.LoadPtr => return await (async @fieldParentPtr(LoadPtr, "base", base).analyze(ira) catch unreachable),
+ }
+ }
+
+ pub fn render(base: *Inst, ofile: *ObjectFile, fn_val: *Value.Fn) (error{OutOfMemory}!?llvm.ValueRef) {
+ switch (base.id) {
+ Id.Return => return @fieldParentPtr(Return, "base", base).render(ofile, fn_val),
+ Id.Const => return @fieldParentPtr(Const, "base", base).render(ofile, fn_val),
+ Id.Call => return @fieldParentPtr(Call, "base", base).render(ofile, fn_val),
+ Id.VarPtr => return @fieldParentPtr(VarPtr, "base", base).render(ofile, fn_val),
+ Id.LoadPtr => return @fieldParentPtr(LoadPtr, "base", base).render(ofile, fn_val),
+ Id.DeclRef => unreachable,
+ Id.PtrType => unreachable,
+ Id.Ref => @panic("TODO"),
+ Id.DeclVar => @panic("TODO"),
+ Id.CheckVoidStmt => @panic("TODO"),
+ Id.Phi => @panic("TODO"),
+ Id.Br => @panic("TODO"),
+ Id.AddImplicitReturnType => @panic("TODO"),
+ }
+ }
+
+ fn ref(base: *Inst, builder: *Builder) void {
+ base.ref_count += 1;
+ if (base.owner_bb != builder.current_basic_block and !base.isCompTime()) {
+ base.owner_bb.ref(builder);
+ }
+ }
+
+ fn copyVal(base: *Inst, comp: *Compilation) !*Value {
+ if (base.parent.?.ref_count == 0) {
+ return base.val.KnownValue.derefAndCopy(comp);
+ }
+ return base.val.KnownValue.copy(comp);
+ }
+
+ fn getAsParam(param: *Inst) !*Inst {
+ param.ref_count -= 1;
+ const child = param.child orelse return error.SemanticAnalysisFailed;
+ switch (child.val) {
+ IrVal.Unknown => return error.SemanticAnalysisFailed,
+ else => return child,
+ }
+ }
+
+ fn getConstVal(self: *Inst, ira: *Analyze) !*Value {
+ if (self.isCompTime()) {
+ return self.val.KnownValue;
+ } else {
+ try ira.addCompileError(self.span, "unable to evaluate constant expression");
+ return error.SemanticAnalysisFailed;
+ }
+ }
+
+ fn getAsConstType(param: *Inst, ira: *Analyze) !*Type {
+ const meta_type = Type.MetaType.get(ira.irb.comp);
+ meta_type.base.base.deref(ira.irb.comp);
+
+ const inst = try param.getAsParam();
+ const casted = try ira.implicitCast(inst, &meta_type.base);
+ const val = try casted.getConstVal(ira);
+ return val.cast(Value.Type).?;
+ }
+
+ fn getAsConstAlign(param: *Inst, ira: *Analyze) !u32 {
+ return error.Unimplemented;
+ //const align_type = Type.Int.get_align(ira.irb.comp);
+ //align_type.base.base.deref(ira.irb.comp);
+
+ //const inst = try param.getAsParam();
+ //const casted = try ira.implicitCast(inst, align_type);
+ //const val = try casted.getConstVal(ira);
+
+ //uint32_t align_bytes = bigint_as_unsigned(&const_val->data.x_bigint);
+ //if (align_bytes == 0) {
+ // ir_add_error(ira, value, buf_sprintf("alignment must be >= 1"));
+ // return false;
+ //}
+
+ //if (!is_power_of_2(align_bytes)) {
+ // ir_add_error(ira, value, buf_sprintf("alignment value %" PRIu32 " is not a power of 2", align_bytes));
+ // return false;
+ //}
+ }
+
+ /// asserts that the type is known
+ fn getKnownType(self: *Inst) *Type {
+ switch (self.val) {
+ IrVal.KnownType => |typ| return typ,
+ IrVal.KnownValue => |value| return value.typ,
+ IrVal.Unknown => unreachable,
+ }
+ }
+
+ pub fn setGenerated(base: *Inst) void {
+ base.is_generated = true;
+ }
+
+ pub fn isNoReturn(base: *const Inst) bool {
+ switch (base.val) {
+ IrVal.Unknown => return false,
+ IrVal.KnownValue => |x| return x.typ.id == Type.Id.NoReturn,
+ IrVal.KnownType => |typ| return typ.id == Type.Id.NoReturn,
+ }
+ }
+
+ pub fn isCompTime(base: *const Inst) bool {
+ return base.val == IrVal.KnownValue;
+ }
+
+ pub fn linkToParent(self: *Inst, parent: *Inst) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
pub const Id = enum {
- Br,
- CondBr,
- SwitchBr,
- SwitchVar,
- SwitchTarget,
- Phi,
- UnOp,
- BinOp,
- DeclVar,
- LoadPtr,
- StorePtr,
- FieldPtr,
- StructFieldPtr,
- UnionFieldPtr,
- ElemPtr,
- VarPtr,
- Call,
- Const,
Return,
- Cast,
- ContainerInitList,
- ContainerInitFields,
- StructInit,
- UnionInit,
- Unreachable,
- TypeOf,
- ToPtrType,
- PtrTypeChild,
- SetRuntimeSafety,
- SetFloatMode,
- ArrayType,
- SliceType,
- Asm,
- SizeOf,
- TestNonNull,
- UnwrapMaybe,
- MaybeWrap,
- UnionTag,
- Clz,
- Ctz,
- Import,
- CImport,
- CInclude,
- CDefine,
- CUndef,
- ArrayLen,
+ Const,
Ref,
- MinValue,
- MaxValue,
- CompileErr,
- CompileLog,
- ErrName,
- EmbedFile,
- Cmpxchg,
- Fence,
- Truncate,
- IntType,
- BoolNot,
- Memset,
- Memcpy,
- Slice,
- MemberCount,
- MemberType,
- MemberName,
- Breakpoint,
- ReturnAddress,
- FrameAddress,
- AlignOf,
- OverflowOp,
- TestErr,
- UnwrapErrCode,
- UnwrapErrPayload,
- ErrWrapCode,
- ErrWrapPayload,
- FnProto,
- TestComptime,
- PtrCast,
- BitCast,
- WidenOrShorten,
- IntToPtr,
- PtrToInt,
- IntToEnum,
- IntToErr,
- ErrToInt,
- CheckSwitchProngs,
- CheckStatementIsVoid,
- TypeName,
- CanImplicitCast,
+ DeclVar,
+ CheckVoidStmt,
+ Phi,
+ Br,
+ AddImplicitReturnType,
+ Call,
DeclRef,
- Panic,
- TagName,
- TagType,
- FieldParentPtr,
- OffsetOf,
- TypeId,
- SetEvalBranchQuota,
- PtrTypeOf,
- AlignCast,
- OpaqueType,
- SetAlignStack,
- ArgType,
- Export,
+ PtrType,
+ VarPtr,
+ LoadPtr,
+ };
+
+ pub const Call = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ fn_ref: *Inst,
+ args: []*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const Call) void {
+ std.debug.warn("#{}(", self.params.fn_ref.debug_id);
+ for (self.params.args) |arg| {
+ std.debug.warn("#{},", arg.debug_id);
+ }
+ std.debug.warn(")");
+ }
+
+ pub fn hasSideEffects(self: *const Call) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Call, ira: *Analyze) !*Inst {
+ const fn_ref = try self.params.fn_ref.getAsParam();
+ const fn_ref_type = fn_ref.getKnownType();
+ const fn_type = fn_ref_type.cast(Type.Fn) orelse {
+ try ira.addCompileError(fn_ref.span, "type '{}' not a function", fn_ref_type.name);
+ return error.SemanticAnalysisFailed;
+ };
+
+ const fn_type_param_count = fn_type.paramCount();
+
+ if (fn_type_param_count != self.params.args.len) {
+ try ira.addCompileError(
+ self.base.span,
+ "expected {} arguments, found {}",
+ fn_type_param_count,
+ self.params.args.len,
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ const args = try ira.irb.arena().alloc(*Inst, self.params.args.len);
+ for (self.params.args) |arg, i| {
+ args[i] = try arg.getAsParam();
+ }
+ const new_inst = try ira.irb.build(Call, self.base.scope, self.base.span, Params{
+ .fn_ref = fn_ref,
+ .args = args,
+ });
+ new_inst.val = IrVal{ .KnownType = fn_type.key.data.Normal.return_type };
+ return new_inst;
+ }
+
+ pub fn render(self: *Call, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const fn_ref = self.params.fn_ref.llvm_value.?;
+
+ const args = try ofile.arena.alloc(llvm.ValueRef, self.params.args.len);
+ for (self.params.args) |arg, i| {
+ args[i] = arg.llvm_value.?;
+ }
+
+ const llvm_cc = llvm.CCallConv;
+ const fn_inline = llvm.FnInline.Auto;
+
+ return llvm.BuildCall(
+ ofile.builder,
+ fn_ref,
+ args.ptr,
+ @intCast(c_uint, args.len),
+ llvm_cc,
+ fn_inline,
+ c"",
+ ) orelse error.OutOfMemory;
+ }
+ };
+
+ pub const Const = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {};
+
+ // Use Builder.buildConst* methods, or, after building a Const instruction,
+ // manually set the ir_val field.
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const Const) void {
+ self.base.val.KnownValue.dump();
+ }
+
+ pub fn hasSideEffects(self: *const Const) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Const, ira: *Analyze) !*Inst {
+ const new_inst = try ira.irb.build(Const, self.base.scope, self.base.span, Params{});
+ new_inst.val = IrVal{ .KnownValue = self.base.val.KnownValue.getRef() };
+ return new_inst;
+ }
+
+ pub fn render(self: *Const, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ return self.base.val.KnownValue.getLlvmConst(ofile);
+ }
+ };
+
+ pub const Return = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ return_value: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(self: *const Return) void {
+ std.debug.warn("#{}", self.params.return_value.debug_id);
+ }
+
+ pub fn hasSideEffects(self: *const Return) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Return, ira: *Analyze) !*Inst {
+ const value = try self.params.return_value.getAsParam();
+ const casted_value = try ira.implicitCast(value, ira.explicit_return_type);
+
+ // TODO detect returning local variable address
+
+ return ira.irb.build(Return, self.base.scope, self.base.span, Params{ .return_value = casted_value });
+ }
+
+ pub fn render(self: *Return, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const value = self.params.return_value.llvm_value;
+ const return_type = self.params.return_value.getKnownType();
+
+ if (return_type.handleIsPtr()) {
+ @panic("TODO");
+ } else {
+ _ = llvm.BuildRet(ofile.builder, value) orelse return error.OutOfMemory;
+ }
+ return null;
+ }
+ };
+
+ pub const Ref = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const Ref) void {}
+
+ pub fn hasSideEffects(inst: *const Ref) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const Ref, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+
+ if (ira.getCompTimeValOrNullUndefOk(target)) |val| {
+ return ira.getCompTimeRef(
+ val,
+ Value.Ptr.Mut.CompTimeConst,
+ self.params.mut,
+ self.params.volatility,
+ );
+ }
+
+ const new_inst = try ira.irb.build(Ref, self.base.scope, self.base.span, Params{
+ .target = target,
+ .mut = self.params.mut,
+ .volatility = self.params.volatility,
+ });
+ const elem_type = target.getKnownType();
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = elem_type,
+ .mut = self.params.mut,
+ .vol = self.params.volatility,
+ .size = Type.Pointer.Size.One,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ // TODO: potentially set the hint that this is a stack pointer. But it might not be - this
+ // could be a ref of a global, for example
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ // TODO potentially add an alloca entry here
+ return new_inst;
+ }
+ };
+
+ pub const DeclRef = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ decl: *Decl,
+ lval: LVal,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const DeclRef) void {}
+
+ pub fn hasSideEffects(inst: *const DeclRef) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const DeclRef, ira: *Analyze) !*Inst {
+ (await (async ira.irb.comp.resolveDecl(self.params.decl) catch unreachable)) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.SemanticAnalysisFailed,
+ };
+ switch (self.params.decl.id) {
+ Decl.Id.CompTime => unreachable,
+ Decl.Id.Var => return error.Unimplemented,
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", self.params.decl);
+ const decl_val = switch (fn_decl.value) {
+ Decl.Fn.Val.Unresolved => unreachable,
+ Decl.Fn.Val.Fn => |fn_val| &fn_val.base,
+ Decl.Fn.Val.FnProto => |fn_proto| &fn_proto.base,
+ };
+ switch (self.params.lval) {
+ LVal.None => {
+ return ira.irb.buildConstValue(self.base.scope, self.base.span, decl_val);
+ },
+ LVal.Ptr => return error.Unimplemented,
+ }
+ },
+ }
+ }
+ };
+
+ pub const VarPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ var_scope: *Scope.Var,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const VarPtr) void {
+ std.debug.warn("{}", inst.params.var_scope.name);
+ }
+
+ pub fn hasSideEffects(inst: *const VarPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const VarPtr, ira: *Analyze) !*Inst {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => @panic("TODO"),
+ Scope.Var.Data.Param => |param| {
+ const new_inst = try ira.irb.build(
+ Inst.VarPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.VarPtr.Params{ .var_scope = self.params.var_scope },
+ );
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = param.typ,
+ .mut = Type.Pointer.Mut.Const,
+ .vol = Type.Pointer.Vol.Non,
+ .size = Type.Pointer.Size.One,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ return new_inst;
+ },
+ }
+ }
+
+ pub fn render(self: *VarPtr, ofile: *ObjectFile, fn_val: *Value.Fn) llvm.ValueRef {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => unreachable, // turned into Inst.Const in analyze pass
+ Scope.Var.Data.Param => |param| return param.llvm_value,
+ }
+ }
+ };
+
+ pub const LoadPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const LoadPtr) void {}
+
+ pub fn hasSideEffects(inst: *const LoadPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const LoadPtr, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ const target_type = target.getKnownType();
+ if (target_type.id != Type.Id.Pointer) {
+ try ira.addCompileError(self.base.span, "dereference of non pointer type '{}'", target_type.name);
+ return error.SemanticAnalysisFailed;
+ }
+ const ptr_type = @fieldParentPtr(Type.Pointer, "base", target_type);
+ // if (instr_is_comptime(ptr)) {
+ // if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst ||
+ // ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)
+ // {
+ // ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &ptr->value);
+ // if (pointee->special != ConstValSpecialRuntime) {
+ // IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
+ // source_instruction->source_node, child_type);
+ // copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ // result->value.type = child_type;
+ // return result;
+ // }
+ // }
+ // }
+ const new_inst = try ira.irb.build(
+ Inst.LoadPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.LoadPtr.Params{ .target = target },
+ );
+ new_inst.val = IrVal{ .KnownType = ptr_type.key.child_type };
+ return new_inst;
+ }
+
+ pub fn render(self: *LoadPtr, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const child_type = self.base.getKnownType();
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ const ptr = self.params.target.llvm_value.?;
+ const ptr_type = self.params.target.getKnownType().cast(Type.Pointer).?;
+
+ return try codegen.getHandleValue(ofile, ptr, ptr_type);
+
+ //uint32_t unaligned_bit_count = ptr_type->data.pointer.unaligned_bit_count;
+ //if (unaligned_bit_count == 0)
+ // return get_handle_value(g, ptr, child_type, ptr_type);
+
+ //bool big_endian = g->is_big_endian;
+
+ //assert(!handle_is_ptr(child_type));
+ //LLVMValueRef containing_int = gen_load(g, ptr, ptr_type, "");
+
+ //uint32_t bit_offset = ptr_type->data.pointer.bit_offset;
+ //uint32_t host_bit_count = LLVMGetIntTypeWidth(LLVMTypeOf(containing_int));
+ //uint32_t shift_amt = big_endian ? host_bit_count - bit_offset - unaligned_bit_count : bit_offset;
+
+ //LLVMValueRef shift_amt_val = LLVMConstInt(LLVMTypeOf(containing_int), shift_amt, false);
+ //LLVMValueRef shifted_value = LLVMBuildLShr(g->builder, containing_int, shift_amt_val, "");
+
+ //return LLVMBuildTrunc(g->builder, shifted_value, child_type->type_ref, "");
+ }
+ };
+
+ pub const PtrType = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ child_type: *Inst,
+ mut: Type.Pointer.Mut,
+ vol: Type.Pointer.Vol,
+ size: Type.Pointer.Size,
+ alignment: ?*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const PtrType) void {}
+
+ pub fn hasSideEffects(inst: *const PtrType) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const PtrType, ira: *Analyze) !*Inst {
+ const child_type = try self.params.child_type.getAsConstType(ira);
+ // if (child_type->id == TypeTableEntryIdUnreachable) {
+ // ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
+ // return ira->codegen->builtin_types.entry_invalid;
+ // } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) {
+ // ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque"));
+ // return ira->codegen->builtin_types.entry_invalid;
+ // }
+ const alignment = if (self.params.alignment) |align_inst| blk: {
+ const amt = try align_inst.getAsConstAlign(ira);
+ break :blk Type.Pointer.Align{ .Override = amt };
+ } else blk: {
+ break :blk Type.Pointer.Align{ .Abi = {} };
+ };
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = child_type,
+ .mut = self.params.mut,
+ .vol = self.params.vol,
+ .size = self.params.size,
+ .alignment = alignment,
+ }) catch unreachable);
+ ptr_type.base.base.deref(ira.irb.comp);
+
+ return ira.irb.buildConstValue(self.base.scope, self.base.span, &ptr_type.base.base);
+ }
+ };
+
+ pub const DeclVar = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ variable: *Variable,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const DeclVar) void {}
+
+ pub fn hasSideEffects(inst: *const DeclVar) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const DeclVar, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const CheckVoidStmt = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const CheckVoidStmt) void {
+ std.debug.warn("#{}", self.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const CheckVoidStmt) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const CheckVoidStmt, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ if (target.getKnownType().id != Type.Id.Void) {
+ try ira.addCompileError(self.base.span, "expression value is ignored");
+ return error.SemanticAnalysisFailed;
+ }
+ return ira.irb.buildConstVoid(self.base.scope, self.base.span, true);
+ }
+ };
+
+ pub const Phi = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ incoming_blocks: []*BasicBlock,
+ incoming_values: []*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const Phi) void {}
+
+ pub fn hasSideEffects(inst: *const Phi) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Phi, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const Br = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ dest_block: *BasicBlock,
+ is_comptime: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(inst: *const Br) void {}
+
+ pub fn hasSideEffects(inst: *const Br) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Br, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const CondBr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ condition: *Inst,
+ then_block: *BasicBlock,
+ else_block: *BasicBlock,
+ is_comptime: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(inst: *const CondBr) void {}
+
+ pub fn hasSideEffects(inst: *const CondBr) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const CondBr, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const AddImplicitReturnType = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const AddImplicitReturnType) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const AddImplicitReturnType) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const AddImplicitReturnType, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ try ira.src_implicit_return_type_list.append(target);
+ return ira.irb.buildConstVoid(self.base.scope, self.base.span, true);
+ }
+ };
+
+ pub const TestErr = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const TestErr) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const TestErr) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const TestErr, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ const target_type = target.getKnownType();
+ switch (target_type.id) {
+ Type.Id.ErrorUnion => {
+ return error.Unimplemented;
+ // if (instr_is_comptime(value)) {
+ // ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
+ // if (!err_union_val)
+ // return ira->codegen->builtin_types.entry_invalid;
+
+ // if (err_union_val->special != ConstValSpecialRuntime) {
+ // ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ // out_val->data.x_bool = (err_union_val->data.x_err_union.err != nullptr);
+ // return ira->codegen->builtin_types.entry_bool;
+ // }
+ // }
+
+ // TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
+ // if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) {
+ // return ira->codegen->builtin_types.entry_invalid;
+ // }
+ // if (!type_is_global_error_set(err_set_type) &&
+ // err_set_type->data.error_set.err_count == 0)
+ // {
+ // assert(err_set_type->data.error_set.infer_fn == nullptr);
+ // ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ // out_val->data.x_bool = false;
+ // return ira->codegen->builtin_types.entry_bool;
+ // }
+
+ // ir_build_test_err_from(&ira->new_irb, &instruction->base, value);
+ // return ira->codegen->builtin_types.entry_bool;
+ },
+ Type.Id.ErrorSet => {
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, true);
+ },
+ else => {
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, false);
+ },
+ }
+ }
+ };
+
+ pub const TestCompTime = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const TestCompTime) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const TestCompTime) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const TestCompTime, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, target.isCompTime());
+ }
+ };
+
+ pub const SaveErrRetAddr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {};
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const SaveErrRetAddr) void {}
+
+ pub fn hasSideEffects(inst: *const SaveErrRetAddr) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const SaveErrRetAddr, ira: *Analyze) !*Inst {
+ return ira.irb.build(Inst.SaveErrRetAddr, self.base.scope, self.base.span, Params{});
+ }
+ };
+};
+
+pub const Variable = struct {
+ child_scope: *Scope,
+};
+
+pub const BasicBlock = struct {
+ ref_count: usize,
+ name_hint: [*]const u8, // must be a C string literal
+ debug_id: usize,
+ scope: *Scope,
+ instruction_list: std.ArrayList(*Inst),
+ ref_instruction: ?*Inst,
+
+ /// for codegen
+ llvm_block: llvm.BasicBlockRef,
+ llvm_exit_block: llvm.BasicBlockRef,
+
+ /// the basic block that is derived from this one in analysis
+ child: ?*BasicBlock,
+
+ /// the basic block that this one derives from in analysis
+ parent: ?*BasicBlock,
+
+ pub fn ref(self: *BasicBlock, builder: *Builder) void {
+ self.ref_count += 1;
+ }
+
+ pub fn linkToParent(self: *BasicBlock, parent: *BasicBlock) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
+};
+
+/// Stuff that survives longer than Builder
+pub const Code = struct {
+ basic_block_list: std.ArrayList(*BasicBlock),
+ arena: std.heap.ArenaAllocator,
+ return_type: ?*Type,
+
+ /// allocator is comp.gpa()
+ pub fn destroy(self: *Code, allocator: *Allocator) void {
+ self.arena.deinit();
+ allocator.destroy(self);
+ }
+
+ pub fn dump(self: *Code) void {
+ var bb_i: usize = 0;
+ for (self.basic_block_list.toSliceConst()) |bb| {
+ std.debug.warn("{s}_{}:\n", bb.name_hint, bb.debug_id);
+ for (bb.instruction_list.toSliceConst()) |instr| {
+ std.debug.warn(" ");
+ instr.dump();
+ std.debug.warn("\n");
+ }
+ }
+ }
+
+ /// returns a ref-incremented value, or adds a compile error
+ pub fn getCompTimeResult(self: *Code, comp: *Compilation) !*Value {
+ const bb = self.basic_block_list.at(0);
+ for (bb.instruction_list.toSliceConst()) |inst| {
+ if (inst.cast(Inst.Return)) |ret_inst| {
+ const ret_value = ret_inst.params.return_value;
+ if (ret_value.isCompTime()) {
+ return ret_value.val.KnownValue.getRef();
+ }
+ try comp.addCompileError(
+ ret_value.scope.findRoot(),
+ ret_value.span,
+ "unable to evaluate constant expression",
+ );
+ return error.SemanticAnalysisFailed;
+ } else if (inst.hasSideEffects()) {
+ try comp.addCompileError(
+ inst.scope.findRoot(),
+ inst.span,
+ "unable to evaluate constant expression",
+ );
+ return error.SemanticAnalysisFailed;
+ }
+ }
+ unreachable;
+ }
+};
+
+pub const Builder = struct {
+ comp: *Compilation,
+ code: *Code,
+ current_basic_block: *BasicBlock,
+ next_debug_id: usize,
+ root_scope: *Scope.Root,
+ is_comptime: bool,
+ is_async: bool,
+ begin_scope: ?*Scope,
+
+ pub const Error = Analyze.Error;
+
+ pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder {
+ const code = try comp.gpa().create(Code{
+ .basic_block_list = undefined,
+ .arena = std.heap.ArenaAllocator.init(comp.gpa()),
+ .return_type = null,
+ });
+ code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
+ errdefer code.destroy(comp.gpa());
+
+ return Builder{
+ .comp = comp,
+ .root_scope = root_scope,
+ .current_basic_block = undefined,
+ .code = code,
+ .next_debug_id = 0,
+ .is_comptime = false,
+ .is_async = false,
+ .begin_scope = begin_scope,
+ };
+ }
+
+ pub fn abort(self: *Builder) void {
+ self.code.destroy(self.comp.gpa());
+ }
+
+ /// Call code.destroy() when done
+ pub fn finish(self: *Builder) *Code {
+ return self.code;
+ }
+
+ /// No need to clean up resources thanks to the arena allocator.
+ pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: [*]const u8) !*BasicBlock {
+ const basic_block = try self.arena().create(BasicBlock{
+ .ref_count = 0,
+ .name_hint = name_hint,
+ .debug_id = self.next_debug_id,
+ .scope = scope,
+ .instruction_list = std.ArrayList(*Inst).init(self.arena()),
+ .child = null,
+ .parent = null,
+ .ref_instruction = null,
+ .llvm_block = undefined,
+ .llvm_exit_block = undefined,
+ });
+ self.next_debug_id += 1;
+ return basic_block;
+ }
+
+ pub fn setCursorAtEndAndAppendBlock(self: *Builder, basic_block: *BasicBlock) !void {
+ try self.code.basic_block_list.append(basic_block);
+ self.setCursorAtEnd(basic_block);
+ }
+
+ pub fn setCursorAtEnd(self: *Builder, basic_block: *BasicBlock) void {
+ self.current_basic_block = basic_block;
+ }
+
+ pub async fn genNode(irb: *Builder, node: *ast.Node, scope: *Scope, lval: LVal) Error!*Inst {
+ switch (node.id) {
+ ast.Node.Id.Root => unreachable,
+ ast.Node.Id.Use => unreachable,
+ ast.Node.Id.TestDecl => unreachable,
+ ast.Node.Id.VarDecl => return error.Unimplemented,
+ ast.Node.Id.Defer => return error.Unimplemented,
+ ast.Node.Id.InfixOp => return error.Unimplemented,
+ ast.Node.Id.PrefixOp => {
+ const prefix_op = @fieldParentPtr(ast.Node.PrefixOp, "base", node);
+ switch (prefix_op.op) {
+ ast.Node.PrefixOp.Op.AddressOf => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.ArrayType => |n| return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Await => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.BitNot => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.BoolNot => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Cancel => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.OptionalType => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Negation => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.NegationWrap => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Resume => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.PtrType => |ptr_info| {
+ const inst = try await (async irb.genPtrType(prefix_op, ptr_info, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.PrefixOp.Op.SliceType => |ptr_info| return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Try => return error.Unimplemented,
+ }
+ },
+ ast.Node.Id.SuffixOp => {
+ const suffix_op = @fieldParentPtr(ast.Node.SuffixOp, "base", node);
+ switch (suffix_op.op) {
+ @TagType(ast.Node.SuffixOp.Op).Call => |*call| {
+ const inst = try await (async irb.genCall(suffix_op, call, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ @TagType(ast.Node.SuffixOp.Op).ArrayAccess => |n| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).Slice => |slice| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).ArrayInitializer => |init_list| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).StructInitializer => |init_list| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).Deref => return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).UnwrapOptional => return error.Unimplemented,
+ }
+ },
+ ast.Node.Id.Switch => return error.Unimplemented,
+ ast.Node.Id.While => return error.Unimplemented,
+ ast.Node.Id.For => return error.Unimplemented,
+ ast.Node.Id.If => return error.Unimplemented,
+ ast.Node.Id.ControlFlowExpression => {
+ const control_flow_expr = @fieldParentPtr(ast.Node.ControlFlowExpression, "base", node);
+ return await (async irb.genControlFlowExpr(control_flow_expr, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.Suspend => return error.Unimplemented,
+ ast.Node.Id.VarType => return error.Unimplemented,
+ ast.Node.Id.ErrorType => return error.Unimplemented,
+ ast.Node.Id.FnProto => return error.Unimplemented,
+ ast.Node.Id.PromiseType => return error.Unimplemented,
+ ast.Node.Id.IntegerLiteral => {
+ const int_lit = @fieldParentPtr(ast.Node.IntegerLiteral, "base", node);
+ return irb.lvalWrap(scope, try irb.genIntLit(int_lit, scope), lval);
+ },
+ ast.Node.Id.FloatLiteral => return error.Unimplemented,
+ ast.Node.Id.StringLiteral => {
+ const str_lit = @fieldParentPtr(ast.Node.StringLiteral, "base", node);
+ const inst = try await (async irb.genStrLit(str_lit, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.Id.MultilineStringLiteral => return error.Unimplemented,
+ ast.Node.Id.CharLiteral => return error.Unimplemented,
+ ast.Node.Id.BoolLiteral => return error.Unimplemented,
+ ast.Node.Id.NullLiteral => return error.Unimplemented,
+ ast.Node.Id.UndefinedLiteral => return error.Unimplemented,
+ ast.Node.Id.ThisLiteral => return error.Unimplemented,
+ ast.Node.Id.Unreachable => return error.Unimplemented,
+ ast.Node.Id.Identifier => {
+ const identifier = @fieldParentPtr(ast.Node.Identifier, "base", node);
+ return await (async irb.genIdentifier(identifier, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.GroupedExpression => {
+ const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", node);
+ return await (async irb.genNode(grouped_expr.expr, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.BuiltinCall => return error.Unimplemented,
+ ast.Node.Id.ErrorSetDecl => return error.Unimplemented,
+ ast.Node.Id.ContainerDecl => return error.Unimplemented,
+ ast.Node.Id.Asm => return error.Unimplemented,
+ ast.Node.Id.Comptime => return error.Unimplemented,
+ ast.Node.Id.Block => {
+ const block = @fieldParentPtr(ast.Node.Block, "base", node);
+ const inst = try await (async irb.genBlock(block, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.Id.DocComment => return error.Unimplemented,
+ ast.Node.Id.SwitchCase => return error.Unimplemented,
+ ast.Node.Id.SwitchElse => return error.Unimplemented,
+ ast.Node.Id.Else => return error.Unimplemented,
+ ast.Node.Id.Payload => return error.Unimplemented,
+ ast.Node.Id.PointerPayload => return error.Unimplemented,
+ ast.Node.Id.PointerIndexPayload => return error.Unimplemented,
+ ast.Node.Id.StructField => return error.Unimplemented,
+ ast.Node.Id.UnionTag => return error.Unimplemented,
+ ast.Node.Id.EnumTag => return error.Unimplemented,
+ ast.Node.Id.ErrorTag => return error.Unimplemented,
+ ast.Node.Id.AsmInput => return error.Unimplemented,
+ ast.Node.Id.AsmOutput => return error.Unimplemented,
+ ast.Node.Id.AsyncAttribute => return error.Unimplemented,
+ ast.Node.Id.ParamDecl => return error.Unimplemented,
+ ast.Node.Id.FieldInitializer => return error.Unimplemented,
+ }
+ }
+
+ async fn genCall(irb: *Builder, suffix_op: *ast.Node.SuffixOp, call: *ast.Node.SuffixOp.Op.Call, scope: *Scope) !*Inst {
+ const fn_ref = try await (async irb.genNode(suffix_op.lhs, scope, LVal.None) catch unreachable);
+
+ const args = try irb.arena().alloc(*Inst, call.params.len);
+ var it = call.params.iterator(0);
+ var i: usize = 0;
+ while (it.next()) |arg_node_ptr| : (i += 1) {
+ args[i] = try await (async irb.genNode(arg_node_ptr.*, scope, LVal.None) catch unreachable);
+ }
+
+ //bool is_async = node->data.fn_call_expr.is_async;
+ //IrInstruction *async_allocator = nullptr;
+ //if (is_async) {
+ // if (node->data.fn_call_expr.async_allocator) {
+ // async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope);
+ // if (async_allocator == irb->codegen->invalid_instruction)
+ // return async_allocator;
+ // }
+ //}
+
+ return irb.build(Inst.Call, scope, Span.token(suffix_op.rtoken), Inst.Call.Params{
+ .fn_ref = fn_ref,
+ .args = args,
+ });
+ //IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, nullptr);
+ //return ir_lval_wrap(irb, scope, fn_call, lval);
+ }
+
+ async fn genPtrType(
+ irb: *Builder,
+ prefix_op: *ast.Node.PrefixOp,
+ ptr_info: ast.Node.PrefixOp.PtrInfo,
+ scope: *Scope,
+ ) !*Inst {
+ // TODO port more logic
+
+ //assert(node->type == NodeTypePointerType);
+ //PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar ||
+ // node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown;
+ //bool is_const = node->data.pointer_type.is_const;
+ //bool is_volatile = node->data.pointer_type.is_volatile;
+ //AstNode *expr_node = node->data.pointer_type.op_expr;
+ //AstNode *align_expr = node->data.pointer_type.align_expr;
+
+ //IrInstruction *align_value;
+ //if (align_expr != nullptr) {
+ // align_value = ir_gen_node(irb, align_expr, scope);
+ // if (align_value == irb->codegen->invalid_instruction)
+ // return align_value;
+ //} else {
+ // align_value = nullptr;
+ //}
+ const child_type = try await (async irb.genNode(prefix_op.rhs, scope, LVal.None) catch unreachable);
+
+ //uint32_t bit_offset_start = 0;
+ //if (node->data.pointer_type.bit_offset_start != nullptr) {
+ // if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
+ // Buf *val_buf = buf_alloc();
+ // bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
+ // return irb->codegen->invalid_instruction;
+ // }
+ // bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
+ //}
+
+ //uint32_t bit_offset_end = 0;
+ //if (node->data.pointer_type.bit_offset_end != nullptr) {
+ // if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
+ // Buf *val_buf = buf_alloc();
+ // bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
+ // return irb->codegen->invalid_instruction;
+ // }
+ // bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
+ //}
+
+ //if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("bit offset start must be less than bit offset end"));
+ // return irb->codegen->invalid_instruction;
+ //}
+
+ return irb.build(Inst.PtrType, scope, Span.node(&prefix_op.base), Inst.PtrType.Params{
+ .child_type = child_type,
+ .mut = Type.Pointer.Mut.Mut,
+ .vol = Type.Pointer.Vol.Non,
+ .size = Type.Pointer.Size.Many,
+ .alignment = null,
+ });
+ }
+
+ fn isCompTime(irb: *Builder, target_scope: *Scope) bool {
+ if (irb.is_comptime)
+ return true;
+
+ var scope = target_scope;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.CompTime => return true,
+ Scope.Id.FnDef => return false,
+ Scope.Id.Decls => unreachable,
+ Scope.Id.Root => unreachable,
+ Scope.Id.Block,
+ Scope.Id.Defer,
+ Scope.Id.DeferExpr,
+ Scope.Id.Var,
+ => scope = scope.parent.?,
+ }
+ }
+ }
+
+ pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
+ const int_token = irb.root_scope.tree.tokenSlice(int_lit.token);
+
+ var base: u8 = undefined;
+ var rest: []const u8 = undefined;
+ if (int_token.len >= 3 and int_token[0] == '0') {
+ base = switch (int_token[1]) {
+ 'b' => u8(2),
+ 'o' => u8(8),
+ 'x' => u8(16),
+ else => unreachable,
+ };
+ rest = int_token[2..];
+ } else {
+ base = 10;
+ rest = int_token;
+ }
+
+ const comptime_int_type = Type.ComptimeInt.get(irb.comp);
+ defer comptime_int_type.base.base.deref(irb.comp);
+
+ const int_val = Value.Int.createFromString(
+ irb.comp,
+ &comptime_int_type.base,
+ base,
+ rest,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.InvalidBase => unreachable,
+ error.InvalidCharForDigit => unreachable,
+ error.DigitTooLargeForBase => unreachable,
+ };
+ errdefer int_val.base.deref(irb.comp);
+
+ const inst = try irb.build(Inst.Const, scope, Span.token(int_lit.token), Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = &int_val.base };
+ return inst;
+ }
+
+ pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
+ const str_token = irb.root_scope.tree.tokenSlice(str_lit.token);
+ const src_span = Span.token(str_lit.token);
+
+ var bad_index: usize = undefined;
+ var buf = std.zig.parseStringLiteral(irb.comp.gpa(), str_token, &bad_index) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.InvalidCharacter => {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "invalid character in string literal: '{c}'",
+ str_token[bad_index],
+ );
+ return error.SemanticAnalysisFailed;
+ },
+ };
+ var buf_cleaned = false;
+ errdefer if (!buf_cleaned) irb.comp.gpa().free(buf);
+
+ if (str_token[0] == 'c') {
+ // first we add a null
+ buf = try irb.comp.gpa().realloc(u8, buf, buf.len + 1);
+ buf[buf.len - 1] = 0;
+
+ // next make an array value
+ const array_val = try await (async Value.Array.createOwnedBuffer(irb.comp, buf) catch unreachable);
+ buf_cleaned = true;
+ defer array_val.base.deref(irb.comp);
+
+ // then make a pointer value pointing at the first element
+ const ptr_val = try await (async Value.Ptr.createArrayElemPtr(
+ irb.comp,
+ array_val,
+ Type.Pointer.Mut.Const,
+ Type.Pointer.Size.Many,
+ 0,
+ ) catch unreachable);
+ defer ptr_val.base.deref(irb.comp);
+
+ return irb.buildConstValue(scope, src_span, &ptr_val.base);
+ } else {
+ const array_val = try await (async Value.Array.createOwnedBuffer(irb.comp, buf) catch unreachable);
+ buf_cleaned = true;
+ defer array_val.base.deref(irb.comp);
+
+ return irb.buildConstValue(scope, src_span, &array_val.base);
+ }
+ }
+
+ pub async fn genBlock(irb: *Builder, block: *ast.Node.Block, parent_scope: *Scope) !*Inst {
+ const block_scope = try Scope.Block.create(irb.comp, parent_scope);
+
+ const outer_block_scope = &block_scope.base;
+ var child_scope = outer_block_scope;
+
+ if (parent_scope.findFnDef()) |fndef_scope| {
+ if (fndef_scope.fn_val.?.block_scope == null) {
+ fndef_scope.fn_val.?.block_scope = block_scope;
+ }
+ }
+
+ if (block.statements.len == 0) {
+ // {}
+ return irb.buildConstVoid(child_scope, Span.token(block.lbrace), false);
+ }
+
+ if (block.label) |label| {
+ block_scope.incoming_values = std.ArrayList(*Inst).init(irb.arena());
+ block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena());
+ block_scope.end_block = try irb.createBasicBlock(parent_scope, c"BlockEnd");
+ block_scope.is_comptime = try irb.buildConstBool(
+ parent_scope,
+ Span.token(block.lbrace),
+ irb.isCompTime(parent_scope),
+ );
+ }
+
+ var is_continuation_unreachable = false;
+ var noreturn_return_value: ?*Inst = null;
+
+ var stmt_it = block.statements.iterator(0);
+ while (stmt_it.next()) |statement_node_ptr| {
+ const statement_node = statement_node_ptr.*;
+
+ if (statement_node.cast(ast.Node.Defer)) |defer_node| {
+ // defer starts a new scope
+ const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token);
+ const kind = switch (defer_token.id) {
+ Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
+ Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
+ else => unreachable,
+ };
+ const defer_expr_scope = try Scope.DeferExpr.create(irb.comp, parent_scope, defer_node.expr);
+ const defer_child_scope = try Scope.Defer.create(irb.comp, parent_scope, kind, defer_expr_scope);
+ child_scope = &defer_child_scope.base;
+ continue;
+ }
+ const statement_value = try await (async irb.genNode(statement_node, child_scope, LVal.None) catch unreachable);
+
+ is_continuation_unreachable = statement_value.isNoReturn();
+ if (is_continuation_unreachable) {
+ // keep the last noreturn statement value around in case we need to return it
+ noreturn_return_value = statement_value;
+ }
+
+ if (statement_value.cast(Inst.DeclVar)) |decl_var| {
+ // variable declarations start a new scope
+ child_scope = decl_var.params.variable.child_scope;
+ } else if (!is_continuation_unreachable) {
+ // this statement's value must be void
+ _ = irb.build(
+ Inst.CheckVoidStmt,
+ child_scope,
+ Span{
+ .first = statement_node.firstToken(),
+ .last = statement_node.lastToken(),
+ },
+ Inst.CheckVoidStmt.Params{ .target = statement_value },
+ );
+ }
+ }
+
+ if (is_continuation_unreachable) {
+ assert(noreturn_return_value != null);
+ if (block.label == null or block_scope.incoming_blocks.len == 0) {
+ return noreturn_return_value.?;
+ }
+
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+ return irb.build(Inst.Phi, parent_scope, Span.token(block.rbrace), Inst.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
+ }
+
+ if (block.label) |label| {
+ try block_scope.incoming_blocks.append(irb.current_basic_block);
+ try block_scope.incoming_values.append(
+ try irb.buildConstVoid(parent_scope, Span.token(block.rbrace), true),
+ );
+ _ = try await (async irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+
+ _ = try irb.buildGen(Inst.Br, parent_scope, Span.token(block.rbrace), Inst.Br.Params{
+ .dest_block = block_scope.end_block,
+ .is_comptime = block_scope.is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+
+ return irb.build(Inst.Phi, parent_scope, Span.token(block.rbrace), Inst.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
+ }
+
+ _ = try await (async irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ return irb.buildConstVoid(child_scope, Span.token(block.rbrace), true);
+ }
+
+ pub async fn genControlFlowExpr(
+ irb: *Builder,
+ control_flow_expr: *ast.Node.ControlFlowExpression,
+ scope: *Scope,
+ lval: LVal,
+ ) !*Inst {
+ switch (control_flow_expr.kind) {
+ ast.Node.ControlFlowExpression.Kind.Break => |arg| return error.Unimplemented,
+ ast.Node.ControlFlowExpression.Kind.Continue => |arg| return error.Unimplemented,
+ ast.Node.ControlFlowExpression.Kind.Return => {
+ const src_span = Span.token(control_flow_expr.ltoken);
+ if (scope.findFnDef() == null) {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "return expression outside function definition",
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ if (scope.findDeferExpr()) |scope_defer_expr| {
+ if (!scope_defer_expr.reported_err) {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "cannot return from defer expression",
+ );
+ scope_defer_expr.reported_err = true;
+ }
+ return error.SemanticAnalysisFailed;
+ }
+
+ const outer_scope = irb.begin_scope.?;
+ const return_value = if (control_flow_expr.rhs) |rhs| blk: {
+ break :blk try await (async irb.genNode(rhs, scope, LVal.None) catch unreachable);
+ } else blk: {
+ break :blk try irb.buildConstVoid(scope, src_span, true);
+ };
+
+ const defer_counts = irb.countDefers(scope, outer_scope);
+ const have_err_defers = defer_counts.error_exit != 0;
+ if (have_err_defers or irb.comp.have_err_ret_tracing) {
+ const err_block = try irb.createBasicBlock(scope, c"ErrRetErr");
+ const ok_block = try irb.createBasicBlock(scope, c"ErrRetOk");
+ if (!have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ }
+
+ const is_err = try irb.build(
+ Inst.TestErr,
+ scope,
+ src_span,
+ Inst.TestErr.Params{ .target = return_value },
+ );
+
+ const err_is_comptime = try irb.buildTestCompTime(scope, src_span, is_err);
+
+ _ = try irb.buildGen(Inst.CondBr, scope, src_span, Inst.CondBr.Params{
+ .condition = is_err,
+ .then_block = err_block,
+ .else_block = ok_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ const ret_stmt_block = try irb.createBasicBlock(scope, c"RetStmt");
+
+ try irb.setCursorAtEndAndAppendBlock(err_block);
+ if (have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ErrorExit) catch unreachable);
+ }
+ if (irb.comp.have_err_ret_tracing and !irb.isCompTime(scope)) {
+ _ = try irb.build(Inst.SaveErrRetAddr, scope, src_span, Inst.SaveErrRetAddr.Params{});
+ }
+ _ = try irb.build(Inst.Br, scope, src_span, Inst.Br.Params{
+ .dest_block = ret_stmt_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(ok_block);
+ if (have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ }
+ _ = try irb.build(Inst.Br, scope, src_span, Inst.Br.Params{
+ .dest_block = ret_stmt_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(ret_stmt_block);
+ return irb.genAsyncReturn(scope, src_span, return_value, false);
+ } else {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ return irb.genAsyncReturn(scope, src_span, return_value, false);
+ }
+ },
+ }
+ }
+
+ pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
+ const src_span = Span.token(identifier.token);
+ const name = irb.root_scope.tree.tokenSlice(identifier.token);
+
+ //if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
+ // IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node);
+ // const_instruction->base.value.type = get_pointer_to_type(irb->codegen,
+ // irb->codegen->builtin_types.entry_void, false);
+ // const_instruction->base.value.special = ConstValSpecialStatic;
+ // const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialDiscard;
+ // return &const_instruction->base;
+ //}
+
+ if (await (async irb.comp.getPrimitiveType(name) catch unreachable)) |result| {
+ if (result) |primitive_type| {
+ defer primitive_type.base.deref(irb.comp);
+ switch (lval) {
+ // if (lval == LValPtr) {
+ // return ir_build_ref(irb, scope, node, value, false, false);
+ LVal.Ptr => return error.Unimplemented,
+ LVal.None => return irb.buildConstValue(scope, src_span, &primitive_type.base),
+ }
+ }
+ } else |err| switch (err) {
+ error.Overflow => {
+ try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large");
+ return error.SemanticAnalysisFailed;
+ },
+ error.OutOfMemory => return error.OutOfMemory,
+ }
+
+ switch (await (async irb.findIdent(scope, name) catch unreachable)) {
+ Ident.Decl => |decl| {
+ return irb.build(Inst.DeclRef, scope, src_span, Inst.DeclRef.Params{
+ .decl = decl,
+ .lval = lval,
+ });
+ },
+ Ident.VarScope => |var_scope| {
+ const var_ptr = try irb.build(Inst.VarPtr, scope, src_span, Inst.VarPtr.Params{ .var_scope = var_scope });
+ switch (lval) {
+ LVal.Ptr => return var_ptr,
+ LVal.None => {
+ return irb.build(Inst.LoadPtr, scope, src_span, Inst.LoadPtr.Params{ .target = var_ptr });
+ },
+ }
+ },
+ Ident.NotFound => {},
+ }
+
+ //if (node->owner->any_imports_failed) {
+ // // skip the error message since we had a failing import in this file
+ // // if an import breaks we don't need redundant undeclared identifier errors
+ // return irb->codegen->invalid_instruction;
+ //}
+
+ // TODO put a variable of same name with invalid type in global scope
+ // so that future references to this same name will find a variable with an invalid type
+
+ try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name);
+ return error.SemanticAnalysisFailed;
+ }
+
+ const DeferCounts = struct {
+ scope_exit: usize,
+ error_exit: usize,
};
+ fn countDefers(irb: *Builder, inner_scope: *Scope, outer_scope: *Scope) DeferCounts {
+ var result = DeferCounts{ .scope_exit = 0, .error_exit = 0 };
+
+ var scope = inner_scope;
+ while (scope != outer_scope) {
+ switch (scope.id) {
+ Scope.Id.Defer => {
+ const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope);
+ switch (defer_scope.kind) {
+ Scope.Defer.Kind.ScopeExit => result.scope_exit += 1,
+ Scope.Defer.Kind.ErrorExit => result.error_exit += 1,
+ }
+ scope = scope.parent orelse break;
+ },
+ Scope.Id.FnDef => break,
+
+ Scope.Id.CompTime,
+ Scope.Id.Block,
+ Scope.Id.Decls,
+ Scope.Id.Root,
+ Scope.Id.Var,
+ => scope = scope.parent orelse break,
+
+ Scope.Id.DeferExpr => unreachable,
+ }
+ }
+ return result;
+ }
+
+ async fn genDefersForBlock(
+ irb: *Builder,
+ inner_scope: *Scope,
+ outer_scope: *Scope,
+ gen_kind: Scope.Defer.Kind,
+ ) !bool {
+ var scope = inner_scope;
+ var is_noreturn = false;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.Defer => {
+ const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope);
+ const generate = switch (defer_scope.kind) {
+ Scope.Defer.Kind.ScopeExit => true,
+ Scope.Defer.Kind.ErrorExit => gen_kind == Scope.Defer.Kind.ErrorExit,
+ };
+ if (generate) {
+ const defer_expr_scope = defer_scope.defer_expr_scope;
+ const instruction = try await (async irb.genNode(
+ defer_expr_scope.expr_node,
+ &defer_expr_scope.base,
+ LVal.None,
+ ) catch unreachable);
+ if (instruction.isNoReturn()) {
+ is_noreturn = true;
+ } else {
+ _ = try irb.build(
+ Inst.CheckVoidStmt,
+ &defer_expr_scope.base,
+ Span.token(defer_expr_scope.expr_node.lastToken()),
+ Inst.CheckVoidStmt.Params{ .target = instruction },
+ );
+ }
+ }
+ },
+ Scope.Id.FnDef,
+ Scope.Id.Decls,
+ Scope.Id.Root,
+ => return is_noreturn,
+
+ Scope.Id.CompTime,
+ Scope.Id.Block,
+ Scope.Id.Var,
+ => scope = scope.parent orelse return is_noreturn,
+
+ Scope.Id.DeferExpr => unreachable,
+ }
+ }
+ }
+
+ pub fn lvalWrap(irb: *Builder, scope: *Scope, instruction: *Inst, lval: LVal) !*Inst {
+ switch (lval) {
+ LVal.None => return instruction,
+ LVal.Ptr => {
+ // We needed a pointer to a value, but we got a value. So we create
+ // an instruction which just makes a const pointer of it.
+ return irb.build(Inst.Ref, scope, instruction.span, Inst.Ref.Params{
+ .target = instruction,
+ .mut = Type.Pointer.Mut.Const,
+ .volatility = Type.Pointer.Vol.Non,
+ });
+ },
+ }
+ }
+
+ fn arena(self: *Builder) *Allocator {
+ return &self.code.arena.allocator;
+ }
+
+ fn buildExtra(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ is_generated: bool,
+ ) !*Inst {
+ const inst = try self.arena().create(I{
+ .base = Inst{
+ .id = Inst.typeToId(I),
+ .is_generated = is_generated,
+ .scope = scope,
+ .debug_id = self.next_debug_id,
+ .val = switch (I.ir_val_init) {
+ IrVal.Init.Unknown => IrVal.Unknown,
+ IrVal.Init.NoReturn => IrVal{ .KnownValue = &Value.NoReturn.get(self.comp).base },
+ IrVal.Init.Void => IrVal{ .KnownValue = &Value.Void.get(self.comp).base },
+ },
+ .ref_count = 0,
+ .span = span,
+ .child = null,
+ .parent = null,
+ .llvm_value = undefined,
+ .owner_bb = self.current_basic_block,
+ },
+ .params = params,
+ });
+
+ // Look at the params and ref() other instructions
+ comptime var i = 0;
+ inline while (i < @memberCount(I.Params)) : (i += 1) {
+ const FieldType = comptime @typeOf(@field(I.Params(undefined), @memberName(I.Params, i)));
+ switch (FieldType) {
+ *Inst => @field(inst.params, @memberName(I.Params, i)).ref(self),
+ *BasicBlock => @field(inst.params, @memberName(I.Params, i)).ref(self),
+ ?*Inst => if (@field(inst.params, @memberName(I.Params, i))) |other| other.ref(self),
+ []*Inst => {
+ // TODO https://github.com/ziglang/zig/issues/1269
+ for (@field(inst.params, @memberName(I.Params, i))) |other|
+ other.ref(self);
+ },
+ []*BasicBlock => {
+ // TODO https://github.com/ziglang/zig/issues/1269
+ for (@field(inst.params, @memberName(I.Params, i))) |other|
+ other.ref(self);
+ },
+ Type.Pointer.Mut,
+ Type.Pointer.Vol,
+ Type.Pointer.Size,
+ LVal,
+ *Decl,
+ *Scope.Var,
+ => {},
+ // it's ok to add more types here, just make sure that
+ // any instructions and basic blocks are ref'd appropriately
+ else => @compileError("unrecognized type in Params: " ++ @typeName(FieldType)),
+ }
+ }
+
+ self.next_debug_id += 1;
+ try self.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ fn build(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Inst {
+ return self.buildExtra(I, scope, span, params, false);
+ }
+
+ fn buildGen(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Inst {
+ return self.buildExtra(I, scope, span, params, true);
+ }
+
+ fn buildConstBool(self: *Builder, scope: *Scope, span: Span, x: bool) !*Inst {
+ const inst = try self.build(Inst.Const, scope, span, Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = &Value.Bool.get(self.comp, x).base };
+ return inst;
+ }
+
+ fn buildConstVoid(self: *Builder, scope: *Scope, span: Span, is_generated: bool) !*Inst {
+ const inst = try self.buildExtra(Inst.Const, scope, span, Inst.Const.Params{}, is_generated);
+ inst.val = IrVal{ .KnownValue = &Value.Void.get(self.comp).base };
+ return inst;
+ }
+
+ fn buildConstValue(self: *Builder, scope: *Scope, span: Span, v: *Value) !*Inst {
+ const inst = try self.build(Inst.Const, scope, span, Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = v.getRef() };
+ return inst;
+ }
+
+ /// If the code is explicitly set to be comptime, then builds a const bool,
+ /// otherwise builds a TestCompTime instruction.
+ fn buildTestCompTime(self: *Builder, scope: *Scope, span: Span, target: *Inst) !*Inst {
+ if (self.isCompTime(scope)) {
+ return self.buildConstBool(scope, span, true);
+ } else {
+ return self.build(
+ Inst.TestCompTime,
+ scope,
+ span,
+ Inst.TestCompTime.Params{ .target = target },
+ );
+ }
+ }
+
+ fn genAsyncReturn(irb: *Builder, scope: *Scope, span: Span, result: *Inst, is_gen: bool) !*Inst {
+ _ = irb.buildGen(
+ Inst.AddImplicitReturnType,
+ scope,
+ span,
+ Inst.AddImplicitReturnType.Params{ .target = result },
+ );
+
+ if (!irb.is_async) {
+ return irb.buildExtra(
+ Inst.Return,
+ scope,
+ span,
+ Inst.Return.Params{ .return_value = result },
+ is_gen,
+ );
+ }
+ return error.Unimplemented;
+
+ //ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
+ //IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
+ // get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ //// TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
+ //IrInstruction *replacement_value = irb->exec->coro_handle;
+ //IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
+ // promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
+ // AtomicRmwOp_xchg, AtomicOrderSeqCst);
+ //ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle);
+ //IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle);
+ //IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ //return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final,
+ // is_comptime);
+ //// the above blocks are rendered by ir_gen after the rest of codegen
+ }
+
+ const Ident = union(enum) {
+ NotFound,
+ Decl: *Decl,
+ VarScope: *Scope.Var,
+ };
+
+ async fn findIdent(irb: *Builder, scope: *Scope, name: []const u8) Ident {
+ var s = scope;
+ while (true) {
+ switch (s.id) {
+ Scope.Id.Root => return Ident.NotFound,
+ Scope.Id.Decls => {
+ const decls = @fieldParentPtr(Scope.Decls, "base", s);
+ const table = await (async decls.getTableReadOnly() catch unreachable);
+ if (table.get(name)) |entry| {
+ return Ident{ .Decl = entry.value };
+ }
+ },
+ Scope.Id.Var => {
+ const var_scope = @fieldParentPtr(Scope.Var, "base", s);
+ if (mem.eql(u8, var_scope.name, name)) {
+ return Ident{ .VarScope = var_scope };
+ }
+ },
+ else => {},
+ }
+ s = s.parent.?;
+ }
+ }
};
+
+const Analyze = struct {
+ irb: Builder,
+ old_bb_index: usize,
+ const_predecessor_bb: ?*BasicBlock,
+ parent_basic_block: *BasicBlock,
+ instruction_index: usize,
+ src_implicit_return_type_list: std.ArrayList(*Inst),
+ explicit_return_type: ?*Type,
+
+ pub const Error = error{
+ /// This is only for when we have already reported a compile error. It is the poison value.
+ SemanticAnalysisFailed,
+
+ /// This is a placeholder - it is useful to use instead of panicking but once the compiler is
+ /// done this error code will be removed.
+ Unimplemented,
+
+ OutOfMemory,
+ };
+
+ pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze {
+ var irb = try Builder.init(comp, root_scope, null);
+ errdefer irb.abort();
+
+ return Analyze{
+ .irb = irb,
+ .old_bb_index = 0,
+ .const_predecessor_bb = null,
+ .parent_basic_block = undefined, // initialized with startBasicBlock
+ .instruction_index = undefined, // initialized with startBasicBlock
+ .src_implicit_return_type_list = std.ArrayList(*Inst).init(irb.arena()),
+ .explicit_return_type = explicit_return_type,
+ };
+ }
+
+ pub fn abort(self: *Analyze) void {
+ self.irb.abort();
+ }
+
+ pub fn getNewBasicBlock(self: *Analyze, old_bb: *BasicBlock, ref_old_instruction: ?*Inst) !*BasicBlock {
+ if (old_bb.child) |child| {
+ if (ref_old_instruction == null or child.ref_instruction != ref_old_instruction)
+ return child;
+ }
+
+ const new_bb = try self.irb.createBasicBlock(old_bb.scope, old_bb.name_hint);
+ new_bb.linkToParent(old_bb);
+ new_bb.ref_instruction = ref_old_instruction;
+ return new_bb;
+ }
+
+ pub fn startBasicBlock(self: *Analyze, old_bb: *BasicBlock, const_predecessor_bb: ?*BasicBlock) void {
+ self.instruction_index = 0;
+ self.parent_basic_block = old_bb;
+ self.const_predecessor_bb = const_predecessor_bb;
+ }
+
+ pub fn finishBasicBlock(ira: *Analyze, old_code: *Code) !void {
+ try ira.irb.code.basic_block_list.append(ira.irb.current_basic_block);
+ ira.instruction_index += 1;
+
+ while (ira.instruction_index < ira.parent_basic_block.instruction_list.len) {
+ const next_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (!next_instruction.is_generated) {
+ try ira.addCompileError(next_instruction.span, "unreachable code");
+ break;
+ }
+ ira.instruction_index += 1;
+ }
+
+ ira.old_bb_index += 1;
+
+ var need_repeat = true;
+ while (true) {
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_bb = old_code.basic_block_list.at(ira.old_bb_index);
+ const new_bb = old_bb.child orelse {
+ ira.old_bb_index += 1;
+ continue;
+ };
+ if (new_bb.instruction_list.len != 0) {
+ ira.old_bb_index += 1;
+ continue;
+ }
+ ira.irb.current_basic_block = new_bb;
+
+ ira.startBasicBlock(old_bb, null);
+ return;
+ }
+ if (!need_repeat)
+ return;
+ need_repeat = false;
+ ira.old_bb_index = 0;
+ continue;
+ }
+ }
+
+ fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
+ return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args);
+ }
+
+ fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
+ // TODO actual implementation
+ return &Type.Void.get(self.irb.comp).base;
+ }
+
+ fn implicitCast(self: *Analyze, target: *Inst, optional_dest_type: ?*Type) Analyze.Error!*Inst {
+ const dest_type = optional_dest_type orelse return target;
+ const from_type = target.getKnownType();
+ if (from_type == dest_type or from_type.id == Type.Id.NoReturn) return target;
+ return self.analyzeCast(target, target, dest_type);
+ }
+
+ fn analyzeCast(ira: *Analyze, source_instr: *Inst, target: *Inst, dest_type: *Type) !*Inst {
+ const from_type = target.getKnownType();
+
+ //if (type_is_invalid(wanted_type) || type_is_invalid(actual_type)) {
+ // return ira->codegen->invalid_instruction;
+ //}
+
+ //// perfect match or non-const to const
+ //ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ // source_node, false);
+ //if (const_cast_result.id == ConstCastResultIdOk) {
+ // return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
+ //}
+
+ //// widening conversion
+ //if (wanted_type->id == TypeTableEntryIdInt &&
+ // actual_type->id == TypeTableEntryIdInt &&
+ // wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
+ // wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// small enough unsigned ints can get casted to large enough signed ints
+ //if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
+ // actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
+ // wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// float widening conversion
+ //if (wanted_type->id == TypeTableEntryIdFloat &&
+ // actual_type->id == TypeTableEntryIdFloat &&
+ // wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from [N]T to []const T
+ //if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
+ // TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from *const [N]T to []const T
+ //if (is_slice(wanted_type) &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.is_const &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+
+ // TypeTableEntry *array_type = actual_type->data.pointer.child_type;
+
+ // if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from [N]T to *const []const T
+ //if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.is_const &&
+ // is_slice(wanted_type->data.pointer.child_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from [N]T to ?[]const T
+ //if (wanted_type->id == TypeTableEntryIdOptional &&
+ // is_slice(wanted_type->data.maybe.child_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// *[N]T to [*]T
+ //if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ // actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
+ // types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ // actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ // !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ //{
+ // return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
+ //}
+
+ //// *[N]T to []T
+ //if (is_slice(wanted_type) &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ // if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ // actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ // !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ // {
+ // return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from T to ?T
+ //// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
+ //if (wanted_type->id == TypeTableEntryIdOptional) {
+ // TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
+ // if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
+ // false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
+ // } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) {
+ // return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // } else if (wanted_child_type->id == TypeTableEntryIdPointer &&
+ // wanted_child_type->data.pointer.is_const &&
+ // (actual_type->id == TypeTableEntryIdPointer || is_container(actual_type)))
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from null literal to maybe type
+ //if (wanted_type->id == TypeTableEntryIdOptional &&
+ // actual_type->id == TypeTableEntryIdNull)
+ //{
+ // return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from child type of error type to error type
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion) {
+ // if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
+ // } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) {
+ // return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // }
+ //}
+
+ //// cast from [N]T to E![]const T
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // is_slice(wanted_type->data.error_union.payload_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from error set to error union type
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // actual_type->id == TypeTableEntryIdErrorSet)
+ //{
+ // return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from T to E!?T
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ // actual_type->id != TypeTableEntryIdOptional)
+ //{
+ // TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
+ // if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
+ // actual_type->id == TypeTableEntryIdNull ||
+ // actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ // cast from comptime-known integer to another integer where the value fits
+ if (target.isCompTime() and (from_type.id == Type.Id.Int or from_type.id == Type.Id.ComptimeInt)) cast: {
+ const target_val = target.val.KnownValue;
+ const from_int = &target_val.cast(Value.Int).?.big_int;
+ const fits = fits: {
+ if (dest_type.cast(Type.ComptimeInt)) |ctint| {
+ break :fits true;
+ }
+ if (dest_type.cast(Type.Int)) |int| {
+ break :fits from_int.fitsInTwosComp(int.key.is_signed, int.key.bit_count);
+ }
+ break :cast;
+ };
+ if (!fits) {
+ try ira.addCompileError(
+ source_instr.span,
+ "integer value '{}' cannot be stored in type '{}'",
+ from_int,
+ dest_type.name,
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ const new_val = try target.copyVal(ira.irb.comp);
+ new_val.setType(dest_type, ira.irb.comp);
+ return ira.irb.buildConstValue(source_instr.scope, source_instr.span, new_val);
+ }
+
+ // cast from number literal to another type
+ // cast from number literal to *const integer
+ //if (actual_type->id == TypeTableEntryIdComptimeFloat ||
+ // actual_type->id == TypeTableEntryIdComptimeInt)
+ //{
+ // ensure_complete_type(ira->codegen, wanted_type);
+ // if (type_is_invalid(wanted_type))
+ // return ira->codegen->invalid_instruction;
+ // if (wanted_type->id == TypeTableEntryIdEnum) {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // } else if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.is_const)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) {
+ // CastOp op;
+ // if ((actual_type->id == TypeTableEntryIdComptimeFloat &&
+ // wanted_type->id == TypeTableEntryIdFloat) ||
+ // (actual_type->id == TypeTableEntryIdComptimeInt &&
+ // wanted_type->id == TypeTableEntryIdInt))
+ // {
+ // op = CastOpNumLitToConcrete;
+ // } else if (wanted_type->id == TypeTableEntryIdInt) {
+ // op = CastOpFloatToInt;
+ // } else if (wanted_type->id == TypeTableEntryIdFloat) {
+ // op = CastOpIntToFloat;
+ // } else {
+ // zig_unreachable();
+ // }
+ // return ir_resolve_cast(ira, source_instr, value, wanted_type, op, false);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ //}
+
+ //// cast from typed number to integer or float literal.
+ //// works when the number is known at compile time
+ //if (instr_is_comptime(value) &&
+ // ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) ||
+ // (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat)))
+ //{
+ // return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from union to the enum type of the union
+ //if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) {
+ // type_ensure_zero_bits_known(ira->codegen, actual_type);
+ // if (type_is_invalid(actual_type))
+ // return ira->codegen->invalid_instruction;
+
+ // if (actual_type->data.unionation.tag_type == wanted_type) {
+ // return ir_analyze_union_to_tag(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// enum to union which has the enum as the tag type
+ //if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
+ // (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ // wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
+ //{
+ // type_ensure_zero_bits_known(ira->codegen, wanted_type);
+ // if (wanted_type->data.unionation.tag_type == actual_type) {
+ // return ir_analyze_enum_to_union(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// enum to &const union which has the enum as the tag type
+ //if (actual_type->id == TypeTableEntryIdEnum && wanted_type->id == TypeTableEntryIdPointer) {
+ // TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
+ // if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ // union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
+ // {
+ // type_ensure_zero_bits_known(ira->codegen, union_type);
+ // if (union_type->data.unionation.tag_type == actual_type) {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, union_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ // }
+ //}
+
+ //// cast from *T to *[1]T
+ //if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
+ //{
+ // TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+ // if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
+ // types_match_const_cast_only(ira, array_type->data.array.child_type,
+ // actual_type->data.pointer.child_type, source_node,
+ // !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ // {
+ // if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
+ // ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
+ // add_error_note(ira->codegen, msg, value->source_node,
+ // buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name),
+ // actual_type->data.pointer.alignment));
+ // add_error_note(ira->codegen, msg, source_instr->source_node,
+ // buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name),
+ // wanted_type->data.pointer.alignment));
+ // return ira->codegen->invalid_instruction;
+ // }
+ // return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from T to *T where T is zero bits
+ //if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ // types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ // actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ //{
+ // type_ensure_zero_bits_known(ira->codegen, actual_type);
+ // if (type_is_invalid(actual_type)) {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // if (!type_has_bits(actual_type)) {
+ // return ir_get_ref(ira, source_instr, value, false, false);
+ // }
+ //}
+
+ //// cast from undefined to anything
+ //if (actual_type->id == TypeTableEntryIdUndefined) {
+ // return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from something to const pointer of it
+ //if (!type_requires_comptime(actual_type)) {
+ // TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
+ // if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
+ // return ir_analyze_cast_ref(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ try ira.addCompileError(
+ source_instr.span,
+ "expected type '{}', found '{}'",
+ dest_type.name,
+ from_type.name,
+ );
+ //ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
+ // buf_sprintf("expected type '%s', found '%s'",
+ // buf_ptr(&wanted_type->name),
+ // buf_ptr(&actual_type->name)));
+ //report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
+ return error.SemanticAnalysisFailed;
+ }
+
+ fn getCompTimeValOrNullUndefOk(self: *Analyze, target: *Inst) ?*Value {
+ @panic("TODO");
+ }
+
+ fn getCompTimeRef(
+ self: *Analyze,
+ value: *Value,
+ ptr_mut: Value.Ptr.Mut,
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ ) Analyze.Error!*Inst {
+ return error.Unimplemented;
+ }
+};
+
+pub async fn gen(
+ comp: *Compilation,
+ body_node: *ast.Node,
+ scope: *Scope,
+) !*Code {
+ var irb = try Builder.init(comp, scope.findRoot(), scope);
+ errdefer irb.abort();
+
+ const entry_block = try irb.createBasicBlock(scope, c"Entry");
+ entry_block.ref(&irb); // Entry block gets a reference because we enter it to begin.
+ try irb.setCursorAtEndAndAppendBlock(entry_block);
+
+ const result = try await (async irb.genNode(body_node, scope, LVal.None) catch unreachable);
+ if (!result.isNoReturn()) {
+ // no need for save_err_ret_addr because this cannot return error
+ _ = try irb.genAsyncReturn(scope, Span.token(body_node.lastToken()), result, true);
+ }
+
+ return irb.finish();
+}
+
+pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
+ const old_entry_bb = old_code.basic_block_list.at(0);
+ const root_scope = old_entry_bb.scope.findRoot();
+
+ var ira = try Analyze.init(comp, root_scope, expected_type);
+ errdefer ira.abort();
+
+ const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);
+ new_entry_bb.ref(&ira.irb);
+
+ ira.irb.current_basic_block = new_entry_bb;
+
+ ira.startBasicBlock(old_entry_bb, null);
+
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (old_instruction.ref_count == 0 and !old_instruction.hasSideEffects()) {
+ ira.instruction_index += 1;
+ continue;
+ }
+
+ const return_inst = try await (async old_instruction.analyze(&ira) catch unreachable);
+ assert(return_inst.val != IrVal.Unknown); // at least the type should be known at this point
+ return_inst.linkToParent(old_instruction);
+ // Note: if we ever modify the above to handle error.CompileError by continuing analysis,
+ // then here we want to check if ira.isCompTime() and return early if true
+
+ if (return_inst.isNoReturn()) {
+ try ira.finishBasicBlock(old_code);
+ continue;
+ }
+
+ ira.instruction_index += 1;
+ }
+
+ if (ira.src_implicit_return_type_list.len == 0) {
+ ira.irb.code.return_type = &Type.NoReturn.get(comp).base;
+ return ira.irb.finish();
+ }
+
+ ira.irb.code.return_type = try ira.resolvePeerTypes(expected_type, ira.src_implicit_return_type_list.toSliceConst());
+ return ira.irb.finish();
+}
diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig
new file mode 100644
index 0000000000..3938c0d90c
--- /dev/null
+++ b/src-self-hosted/libc_installation.zig
@@ -0,0 +1,462 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const event = std.event;
+const Target = @import("target.zig").Target;
+const c = @import("c.zig");
+
+/// See the render function implementation for documentation of the fields.
+pub const LibCInstallation = struct {
+ include_dir: []const u8,
+ lib_dir: ?[]const u8,
+ static_lib_dir: ?[]const u8,
+ msvc_lib_dir: ?[]const u8,
+ kernel32_lib_dir: ?[]const u8,
+ dynamic_linker_path: ?[]const u8,
+
+ pub const FindError = error{
+ OutOfMemory,
+ FileSystem,
+ UnableToSpawnCCompiler,
+ CCompilerExitCode,
+ CCompilerCrashed,
+ CCompilerCannotFindHeaders,
+ LibCRuntimeNotFound,
+ LibCStdLibHeaderNotFound,
+ LibCKernel32LibNotFound,
+ UnsupportedArchitecture,
+ };
+
+ pub fn parse(
+ self: *LibCInstallation,
+ allocator: *std.mem.Allocator,
+ libc_file: []const u8,
+ stderr: *std.io.OutStream(std.io.FileOutStream.Error),
+ ) !void {
+ self.initEmpty();
+
+ const keys = []const []const u8{
+ "include_dir",
+ "lib_dir",
+ "static_lib_dir",
+ "msvc_lib_dir",
+ "kernel32_lib_dir",
+ "dynamic_linker_path",
+ };
+ const FoundKey = struct {
+ found: bool,
+ allocated: ?[]u8,
+ };
+ var found_keys = [1]FoundKey{FoundKey{ .found = false, .allocated = null }} ** keys.len;
+ errdefer {
+ self.initEmpty();
+ for (found_keys) |found_key| {
+ if (found_key.allocated) |s| allocator.free(s);
+ }
+ }
+
+ const contents = try std.io.readFileAlloc(allocator, libc_file);
+ defer allocator.free(contents);
+
+ var it = std.mem.split(contents, "\n");
+ while (it.next()) |line| {
+ if (line.len == 0 or line[0] == '#') continue;
+ var line_it = std.mem.split(line, "=");
+ const name = line_it.next() orelse {
+ try stderr.print("missing equal sign after field name\n");
+ return error.ParseError;
+ };
+ const value = line_it.rest();
+ inline for (keys) |key, i| {
+ if (std.mem.eql(u8, name, key)) {
+ found_keys[i].found = true;
+ switch (@typeInfo(@typeOf(@field(self, key)))) {
+ builtin.TypeId.Optional => {
+ if (value.len == 0) {
+ @field(self, key) = null;
+ } else {
+ found_keys[i].allocated = try std.mem.dupe(allocator, u8, value);
+ @field(self, key) = found_keys[i].allocated;
+ }
+ },
+ else => {
+ if (value.len == 0) {
+ try stderr.print("field cannot be empty: {}\n", key);
+ return error.ParseError;
+ }
+ const dupe = try std.mem.dupe(allocator, u8, value);
+ found_keys[i].allocated = dupe;
+ @field(self, key) = dupe;
+ },
+ }
+ break;
+ }
+ }
+ }
+ for (found_keys) |found_key, i| {
+ if (!found_key.found) {
+ try stderr.print("missing field: {}\n", keys[i]);
+ return error.ParseError;
+ }
+ }
+ }
+
+ pub fn render(self: *const LibCInstallation, out: *std.io.OutStream(std.io.FileOutStream.Error)) !void {
+ @setEvalBranchQuota(4000);
+ try out.print(
+ \\# The directory that contains `stdlib.h`.
+ \\# On Linux, can be found with: `cc -E -Wp,-v -xc /dev/null`
+ \\include_dir={}
+ \\
+ \\# The directory that contains `crt1.o`.
+ \\# On Linux, can be found with `cc -print-file-name=crt1.o`.
+ \\# Not needed when targeting MacOS.
+ \\lib_dir={}
+ \\
+ \\# The directory that contains `crtbegin.o`.
+ \\# On Linux, can be found with `cc -print-file-name=crtbegin.o`.
+ \\# Not needed when targeting MacOS or Windows.
+ \\static_lib_dir={}
+ \\
+ \\# The directory that contains `vcruntime.lib`.
+ \\# Only needed when targeting Windows.
+ \\msvc_lib_dir={}
+ \\
+ \\# The directory that contains `kernel32.lib`.
+ \\# Only needed when targeting Windows.
+ \\kernel32_lib_dir={}
+ \\
+ \\# The full path to the dynamic linker, on the target system.
+ \\# Only needed when targeting Linux.
+ \\dynamic_linker_path={}
+ \\
+ ,
+ self.include_dir,
+ self.lib_dir orelse "",
+ self.static_lib_dir orelse "",
+ self.msvc_lib_dir orelse "",
+ self.kernel32_lib_dir orelse "",
+ self.dynamic_linker_path orelse Target(Target.Native).getDynamicLinkerPath(),
+ );
+ }
+
+ /// Finds the default, native libc.
+ pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void {
+ self.initEmpty();
+ var group = event.Group(FindError!void).init(loop);
+ errdefer group.cancelAll();
+ var windows_sdk: ?*c.ZigWindowsSDK = null;
+ errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk));
+
+ switch (builtin.os) {
+ builtin.Os.windows => {
+ var sdk: *c.ZigWindowsSDK = undefined;
+ switch (c.zig_find_windows_sdk(@ptrCast(?[*]?[*]c.ZigWindowsSDK, &sdk))) {
+ c.ZigFindWindowsSdkError.None => {
+ windows_sdk = sdk;
+
+ if (sdk.msvc_lib_dir_ptr) |ptr| {
+ self.msvc_lib_dir = try std.mem.dupe(loop.allocator, u8, ptr[0..sdk.msvc_lib_dir_len]);
+ }
+ try group.call(findNativeKernel32LibDir, self, loop, sdk);
+ try group.call(findNativeIncludeDirWindows, self, loop, sdk);
+ try group.call(findNativeLibDirWindows, self, loop, sdk);
+ },
+ c.ZigFindWindowsSdkError.OutOfMemory => return error.OutOfMemory,
+ c.ZigFindWindowsSdkError.NotFound => return error.NotFound,
+ c.ZigFindWindowsSdkError.PathTooLong => return error.NotFound,
+ }
+ },
+ builtin.Os.linux => {
+ try group.call(findNativeIncludeDirLinux, self, loop);
+ try group.call(findNativeLibDirLinux, self, loop);
+ try group.call(findNativeStaticLibDir, self, loop);
+ try group.call(findNativeDynamicLinker, self, loop);
+ },
+ builtin.Os.macosx => {
+ self.include_dir = try std.mem.dupe(loop.allocator, u8, "/usr/include");
+ },
+ else => @compileError("unimplemented: find libc for this OS"),
+ }
+ return await (async group.wait() catch unreachable);
+ }
+
+ async fn findNativeIncludeDirLinux(self: *LibCInstallation, loop: *event.Loop) !void {
+ const cc_exe = std.os.getEnvPosix("CC") orelse "cc";
+ const argv = []const []const u8{
+ cc_exe,
+ "-E",
+ "-Wp,-v",
+ "-xc",
+ "/dev/null",
+ };
+ // TODO make this use event loop
+ const errorable_result = std.os.ChildProcess.exec(loop.allocator, argv, null, null, 1024 * 1024);
+ const exec_result = if (std.debug.runtime_safety) blk: {
+ break :blk errorable_result catch unreachable;
+ } else blk: {
+ break :blk errorable_result catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.UnableToSpawnCCompiler,
+ };
+ };
+ defer {
+ loop.allocator.free(exec_result.stdout);
+ loop.allocator.free(exec_result.stderr);
+ }
+
+ switch (exec_result.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) return error.CCompilerExitCode;
+ },
+ else => {
+ return error.CCompilerCrashed;
+ },
+ }
+
+ var it = std.mem.split(exec_result.stderr, "\n\r");
+ var search_paths = std.ArrayList([]const u8).init(loop.allocator);
+ defer search_paths.deinit();
+ while (it.next()) |line| {
+ if (line.len != 0 and line[0] == ' ') {
+ try search_paths.append(line);
+ }
+ }
+ if (search_paths.len == 0) {
+ return error.CCompilerCannotFindHeaders;
+ }
+
+ // search in reverse order
+ var path_i: usize = 0;
+ while (path_i < search_paths.len) : (path_i += 1) {
+ const search_path_untrimmed = search_paths.at(search_paths.len - path_i - 1);
+ const search_path = std.mem.trimLeft(u8, search_path_untrimmed, " ");
+ const stdlib_path = try std.os.path.join(loop.allocator, search_path, "stdlib.h");
+ defer loop.allocator.free(stdlib_path);
+
+ if (try fileExists(loop.allocator, stdlib_path)) {
+ self.include_dir = try std.mem.dupe(loop.allocator, u8, search_path);
+ return;
+ }
+ }
+
+ return error.LibCStdLibHeaderNotFound;
+ }
+
+ async fn findNativeIncludeDirWindows(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) !void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Include\\{}\\ucrt", search.path, search.version);
+
+ const stdlib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "stdlib.h");
+ defer loop.allocator.free(stdlib_path);
+
+ if (try fileExists(loop.allocator, stdlib_path)) {
+ self.include_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+
+ return error.LibCStdLibHeaderNotFound;
+ }
+
+ async fn findNativeLibDirWindows(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Lib\\{}\\ucrt\\", search.path, search.version);
+ switch (builtin.arch) {
+ builtin.Arch.i386 => try stream.write("x86"),
+ builtin.Arch.x86_64 => try stream.write("x64"),
+ builtin.Arch.aarch64 => try stream.write("arm"),
+ else => return error.UnsupportedArchitecture,
+ }
+ const ucrt_lib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "ucrt.lib");
+ defer loop.allocator.free(ucrt_lib_path);
+ if (try fileExists(loop.allocator, ucrt_lib_path)) {
+ self.lib_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+ return error.LibCRuntimeNotFound;
+ }
+
+ async fn findNativeLibDirLinux(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ self.lib_dir = try await (async ccPrintFileName(loop, "crt1.o", true) catch unreachable);
+ }
+
+ async fn findNativeStaticLibDir(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ self.static_lib_dir = try await (async ccPrintFileName(loop, "crtbegin.o", true) catch unreachable);
+ }
+
+ async fn findNativeDynamicLinker(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ var dyn_tests = []DynTest{
+ DynTest{
+ .name = "ld-linux-x86-64.so.2",
+ .result = null,
+ },
+ DynTest{
+ .name = "ld-musl-x86_64.so.1",
+ .result = null,
+ },
+ };
+ var group = event.Group(FindError!void).init(loop);
+ errdefer group.cancelAll();
+ for (dyn_tests) |*dyn_test| {
+ try group.call(testNativeDynamicLinker, self, loop, dyn_test);
+ }
+ try await (async group.wait() catch unreachable);
+ for (dyn_tests) |*dyn_test| {
+ if (dyn_test.result) |result| {
+ self.dynamic_linker_path = result;
+ return;
+ }
+ }
+ }
+
+ const DynTest = struct {
+ name: []const u8,
+ result: ?[]const u8,
+ };
+
+ async fn testNativeDynamicLinker(self: *LibCInstallation, loop: *event.Loop, dyn_test: *DynTest) FindError!void {
+ if (await (async ccPrintFileName(loop, dyn_test.name, false) catch unreachable)) |result| {
+ dyn_test.result = result;
+ return;
+ } else |err| switch (err) {
+ error.LibCRuntimeNotFound => return,
+ else => return err,
+ }
+ }
+
+
+ async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Lib\\{}\\um\\", search.path, search.version);
+ switch (builtin.arch) {
+ builtin.Arch.i386 => try stream.write("x86\\"),
+ builtin.Arch.x86_64 => try stream.write("x64\\"),
+ builtin.Arch.aarch64 => try stream.write("arm\\"),
+ else => return error.UnsupportedArchitecture,
+ }
+ const kernel32_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "kernel32.lib");
+ defer loop.allocator.free(kernel32_path);
+ if (try fileExists(loop.allocator, kernel32_path)) {
+ self.kernel32_lib_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+ return error.LibCKernel32LibNotFound;
+ }
+
+ fn initEmpty(self: *LibCInstallation) void {
+ self.* = LibCInstallation{
+ .include_dir = ([*]const u8)(undefined)[0..0],
+ .lib_dir = null,
+ .static_lib_dir = null,
+ .msvc_lib_dir = null,
+ .kernel32_lib_dir = null,
+ .dynamic_linker_path = null,
+ };
+ }
+};
+
+/// caller owns returned memory
+async fn ccPrintFileName(loop: *event.Loop, o_file: []const u8, want_dirname: bool) ![]u8 {
+ const cc_exe = std.os.getEnvPosix("CC") orelse "cc";
+ const arg1 = try std.fmt.allocPrint(loop.allocator, "-print-file-name={}", o_file);
+ defer loop.allocator.free(arg1);
+ const argv = []const []const u8{ cc_exe, arg1 };
+
+ // TODO This simulates evented I/O for the child process exec
+ await (async loop.yield() catch unreachable);
+ const errorable_result = std.os.ChildProcess.exec(loop.allocator, argv, null, null, 1024 * 1024);
+ const exec_result = if (std.debug.runtime_safety) blk: {
+ break :blk errorable_result catch unreachable;
+ } else blk: {
+ break :blk errorable_result catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.UnableToSpawnCCompiler,
+ };
+ };
+ defer {
+ loop.allocator.free(exec_result.stdout);
+ loop.allocator.free(exec_result.stderr);
+ }
+ switch (exec_result.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) return error.CCompilerExitCode;
+ },
+ else => {
+ return error.CCompilerCrashed;
+ },
+ }
+ var it = std.mem.split(exec_result.stdout, "\n\r");
+ const line = it.next() orelse return error.LibCRuntimeNotFound;
+ const dirname = std.os.path.dirname(line) orelse return error.LibCRuntimeNotFound;
+
+ if (want_dirname) {
+ return std.mem.dupe(loop.allocator, u8, dirname);
+ } else {
+ return std.mem.dupe(loop.allocator, u8, line);
+ }
+}
+
+const Search = struct {
+ path: []const u8,
+ version: []const u8,
+};
+
+fn fillSearch(search_buf: *[2]Search, sdk: *c.ZigWindowsSDK) []Search {
+ var search_end: usize = 0;
+ if (sdk.path10_ptr) |path10_ptr| {
+ if (sdk.version10_ptr) |ver10_ptr| {
+ search_buf[search_end] = Search{
+ .path = path10_ptr[0..sdk.path10_len],
+ .version = ver10_ptr[0..sdk.version10_len],
+ };
+ search_end += 1;
+ }
+ }
+ if (sdk.path81_ptr) |path81_ptr| {
+ if (sdk.version81_ptr) |ver81_ptr| {
+ search_buf[search_end] = Search{
+ .path = path81_ptr[0..sdk.path81_len],
+ .version = ver81_ptr[0..sdk.version81_len],
+ };
+ search_end += 1;
+ }
+ }
+ return search_buf[0..search_end];
+}
+
+
+fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool {
+ if (std.os.File.access(allocator, path)) |_| {
+ return true;
+ } else |err| switch (err) {
+ error.NotFound, error.PermissionDenied => return false,
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.FileSystem,
+ }
+}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
new file mode 100644
index 0000000000..3b79c5b891
--- /dev/null
+++ b/src-self-hosted/link.zig
@@ -0,0 +1,737 @@
+const std = @import("std");
+const mem = std.mem;
+const c = @import("c.zig");
+const builtin = @import("builtin");
+const ObjectFormat = builtin.ObjectFormat;
+const Compilation = @import("compilation.zig").Compilation;
+const Target = @import("target.zig").Target;
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
+const assert = std.debug.assert;
+
+const Context = struct {
+ comp: *Compilation,
+ arena: std.heap.ArenaAllocator,
+ args: std.ArrayList([*]const u8),
+ link_in_crt: bool,
+
+ link_err: error{OutOfMemory}!void,
+ link_msg: std.Buffer,
+
+ libc: *LibCInstallation,
+ out_file_path: std.Buffer,
+};
+
+pub async fn link(comp: *Compilation) !void {
+ var ctx = Context{
+ .comp = comp,
+ .arena = std.heap.ArenaAllocator.init(comp.gpa()),
+ .args = undefined,
+ .link_in_crt = comp.haveLibC() and comp.kind == Compilation.Kind.Exe,
+ .link_err = {},
+ .link_msg = undefined,
+ .libc = undefined,
+ .out_file_path = undefined,
+ };
+ defer ctx.arena.deinit();
+ ctx.args = std.ArrayList([*]const u8).init(&ctx.arena.allocator);
+ ctx.link_msg = std.Buffer.initNull(&ctx.arena.allocator);
+
+ if (comp.link_out_file) |out_file| {
+ ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, out_file);
+ } else {
+ ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, comp.name.toSliceConst());
+ switch (comp.kind) {
+ Compilation.Kind.Exe => {
+ try ctx.out_file_path.append(comp.target.exeFileExt());
+ },
+ Compilation.Kind.Lib => {
+ try ctx.out_file_path.append(comp.target.libFileExt(comp.is_static));
+ },
+ Compilation.Kind.Obj => {
+ try ctx.out_file_path.append(comp.target.objFileExt());
+ },
+ }
+ }
+
+ // even though we're calling LLD as a library it thinks the first
+ // argument is its own exe name
+ try ctx.args.append(c"lld");
+
+ if (comp.haveLibC()) {
+ ctx.libc = ctx.comp.override_libc orelse blk: {
+ switch (comp.target) {
+ Target.Native => {
+ break :blk (await (async comp.event_loop_local.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
+ },
+ else => return error.LibCRequiredButNotProvidedOrFound,
+ }
+ };
+ }
+
+ try constructLinkerArgs(&ctx);
+
+ if (comp.verbose_link) {
+ for (ctx.args.toSliceConst()) |arg, i| {
+ const space = if (i == 0) "" else " ";
+ std.debug.warn("{}{s}", space, arg);
+ }
+ std.debug.warn("\n");
+ }
+
+ const extern_ofmt = toExternObjectFormatType(comp.target.getObjectFormat());
+ const args_slice = ctx.args.toSlice();
+
+ {
+ // LLD is not thread-safe, so we grab a global lock.
+ const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable);
+ defer held.release();
+
+ // Not evented I/O. LLD does its own multithreading internally.
+ if (!ZigLLDLink(extern_ofmt, args_slice.ptr, args_slice.len, linkDiagCallback, @ptrCast(*c_void, &ctx))) {
+ if (!ctx.link_msg.isNull()) {
+ // TODO capture these messages and pass them through the system, reporting them through the
+ // event system instead of printing them directly here.
+ // perhaps try to parse and understand them.
+ std.debug.warn("{}\n", ctx.link_msg.toSliceConst());
+ }
+ return error.LinkFailed;
+ }
+ }
+}
+
+extern fn ZigLLDLink(
+ oformat: c.ZigLLVM_ObjectFormatType,
+ args: [*]const [*]const u8,
+ arg_count: usize,
+ append_diagnostic: extern fn (*c_void, [*]const u8, usize) void,
+ context: *c_void,
+) bool;
+
+extern fn linkDiagCallback(context: *c_void, ptr: [*]const u8, len: usize) void {
+ const ctx = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
+ ctx.link_err = linkDiagCallbackErrorable(ctx, ptr[0..len]);
+}
+
+fn linkDiagCallbackErrorable(ctx: *Context, msg: []const u8) !void {
+ if (ctx.link_msg.isNull()) {
+ try ctx.link_msg.resize(0);
+ }
+ try ctx.link_msg.append(msg);
+}
+
+fn toExternObjectFormatType(ofmt: ObjectFormat) c.ZigLLVM_ObjectFormatType {
+ return switch (ofmt) {
+ ObjectFormat.unknown => c.ZigLLVM_UnknownObjectFormat,
+ ObjectFormat.coff => c.ZigLLVM_COFF,
+ ObjectFormat.elf => c.ZigLLVM_ELF,
+ ObjectFormat.macho => c.ZigLLVM_MachO,
+ ObjectFormat.wasm => c.ZigLLVM_Wasm,
+ };
+}
+
+fn constructLinkerArgs(ctx: *Context) !void {
+ switch (ctx.comp.target.getObjectFormat()) {
+ ObjectFormat.unknown => unreachable,
+ ObjectFormat.coff => return constructLinkerArgsCoff(ctx),
+ ObjectFormat.elf => return constructLinkerArgsElf(ctx),
+ ObjectFormat.macho => return constructLinkerArgsMachO(ctx),
+ ObjectFormat.wasm => return constructLinkerArgsWasm(ctx),
+ }
+}
+
+fn constructLinkerArgsElf(ctx: *Context) !void {
+ // TODO commented out code in this function
+ //if (g->linker_script) {
+ // lj->args.append("-T");
+ // lj->args.append(g->linker_script);
+ //}
+
+ //if (g->no_rosegment_workaround) {
+ // lj->args.append("--no-rosegment");
+ //}
+ try ctx.args.append(c"--gc-sections");
+
+ //lj->args.append("-m");
+ //lj->args.append(getLDMOption(&g->zig_target));
+
+ //bool is_lib = g->out_type == OutTypeLib;
+ //bool shared = !g->is_static && is_lib;
+ //Buf *soname = nullptr;
+ if (ctx.comp.is_static) {
+ if (ctx.comp.target.isArmOrThumb()) {
+ try ctx.args.append(c"-Bstatic");
+ } else {
+ try ctx.args.append(c"-static");
+ }
+ }
+ //} else if (shared) {
+ // lj->args.append("-shared");
+
+ // if (buf_len(&lj->out_file) == 0) {
+ // buf_appendf(&lj->out_file, "lib%s.so.%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize "",
+ // buf_ptr(g->root_out_name), g->version_major, g->version_minor, g->version_patch);
+ // }
+ // soname = buf_sprintf("lib%s.so.%" ZIG_PRI_usize "", buf_ptr(g->root_out_name), g->version_major);
+ //}
+
+ try ctx.args.append(c"-o");
+ try ctx.args.append(ctx.out_file_path.ptr());
+
+ if (ctx.link_in_crt) {
+ const crt1o = if (ctx.comp.is_static) "crt1.o" else "Scrt1.o";
+ const crtbegino = if (ctx.comp.is_static) "crtbeginT.o" else "crtbegin.o";
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, crt1o);
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, "crti.o");
+ try addPathJoin(ctx, ctx.libc.static_lib_dir.?, crtbegino);
+ }
+
+ //for (size_t i = 0; i < g->rpath_list.length; i += 1) {
+ // Buf *rpath = g->rpath_list.at(i);
+ // add_rpath(lj, rpath);
+ //}
+ //if (g->each_lib_rpath) {
+ // for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // for (size_t i = 0; i < g->link_libs_list.length; i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // bool does_exist;
+ // Buf *test_path = buf_sprintf("%s/lib%s.so", lib_dir, buf_ptr(link_lib->name));
+ // if (os_file_exists(test_path, &does_exist) != ErrorNone) {
+ // zig_panic("link: unable to check if file exists: %s", buf_ptr(test_path));
+ // }
+ // if (does_exist) {
+ // add_rpath(lj, buf_create_from_str(lib_dir));
+ // break;
+ // }
+ // }
+ // }
+ //}
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append("-L");
+ // lj->args.append(lib_dir);
+ //}
+
+ if (ctx.comp.haveLibC()) {
+ try ctx.args.append(c"-L");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.lib_dir.?)).ptr);
+
+ try ctx.args.append(c"-L");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.static_lib_dir.?)).ptr);
+
+ if (!ctx.comp.is_static) {
+ const dl = blk: {
+ if (ctx.libc.dynamic_linker_path) |dl| break :blk dl;
+ if (ctx.comp.target.getDynamicLinkerPath()) |dl| break :blk dl;
+ return error.LibCMissingDynamicLinker;
+ };
+ try ctx.args.append(c"-dynamic-linker");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, dl)).ptr);
+ }
+ }
+
+ //if (shared) {
+ // lj->args.append("-soname");
+ // lj->args.append(buf_ptr(soname));
+ //}
+
+ // .o files
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ //if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ // if (g->libc_link_lib == nullptr) {
+ // Buf *builtin_o_path = build_o(g, "builtin");
+ // lj->args.append(buf_ptr(builtin_o_path));
+ // }
+
+ // // sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage
+ // Buf *compiler_rt_o_path = build_compiler_rt(g);
+ // lj->args.append(buf_ptr(compiler_rt_o_path));
+ //}
+
+ //for (size_t i = 0; i < g->link_libs_list.length; i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // Buf *arg;
+ // if (buf_starts_with_str(link_lib->name, "/") || buf_ends_with_str(link_lib->name, ".a") ||
+ // buf_ends_with_str(link_lib->name, ".so"))
+ // {
+ // arg = link_lib->name;
+ // } else {
+ // arg = buf_sprintf("-l%s", buf_ptr(link_lib->name));
+ // }
+ // lj->args.append(buf_ptr(arg));
+ //}
+
+ // libc dep
+ if (ctx.comp.haveLibC()) {
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"--start-group");
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"-lgcc_eh");
+ try ctx.args.append(c"-lc");
+ try ctx.args.append(c"-lm");
+ try ctx.args.append(c"--end-group");
+ } else {
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"--as-needed");
+ try ctx.args.append(c"-lgcc_s");
+ try ctx.args.append(c"--no-as-needed");
+ try ctx.args.append(c"-lc");
+ try ctx.args.append(c"-lm");
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"--as-needed");
+ try ctx.args.append(c"-lgcc_s");
+ try ctx.args.append(c"--no-as-needed");
+ }
+ }
+
+ // crt end
+ if (ctx.link_in_crt) {
+ try addPathJoin(ctx, ctx.libc.static_lib_dir.?, "crtend.o");
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, "crtn.o");
+ }
+
+ if (ctx.comp.target != Target.Native) {
+ try ctx.args.append(c"--allow-shlib-undefined");
+ }
+
+ if (ctx.comp.target.getOs() == builtin.Os.zen) {
+ try ctx.args.append(c"-e");
+ try ctx.args.append(c"_start");
+
+ try ctx.args.append(c"--image-base=0x10000000");
+ }
+}
+
+fn addPathJoin(ctx: *Context, dirname: []const u8, basename: []const u8) !void {
+ const full_path = try std.os.path.join(&ctx.arena.allocator, dirname, basename);
+ const full_path_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, full_path);
+ try ctx.args.append(full_path_with_null.ptr);
+}
+
+fn constructLinkerArgsCoff(ctx: *Context) !void {
+ try ctx.args.append(c"-NOLOGO");
+
+ if (!ctx.comp.strip) {
+ try ctx.args.append(c"-DEBUG");
+ }
+
+ switch (ctx.comp.target.getArch()) {
+ builtin.Arch.i386 => try ctx.args.append(c"-MACHINE:X86"),
+ builtin.Arch.x86_64 => try ctx.args.append(c"-MACHINE:X64"),
+ builtin.Arch.aarch64 => try ctx.args.append(c"-MACHINE:ARM"),
+ else => return error.UnsupportedLinkArchitecture,
+ }
+
+ if (ctx.comp.windows_subsystem_windows) {
+ try ctx.args.append(c"/SUBSYSTEM:windows");
+ } else if (ctx.comp.windows_subsystem_console) {
+ try ctx.args.append(c"/SUBSYSTEM:console");
+ }
+
+ const is_library = ctx.comp.kind == Compilation.Kind.Lib;
+
+ const out_arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-OUT:{}\x00", ctx.out_file_path.toSliceConst());
+ try ctx.args.append(out_arg.ptr);
+
+ if (ctx.comp.haveLibC()) {
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.msvc_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.kernel32_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.lib_dir.?)).ptr);
+ }
+
+ if (ctx.link_in_crt) {
+ const lib_str = if (ctx.comp.is_static) "lib" else "";
+ const d_str = if (ctx.comp.build_mode == builtin.Mode.Debug) "d" else "";
+
+ if (ctx.comp.is_static) {
+ const cmt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "libcmt{}.lib\x00", d_str);
+ try ctx.args.append(cmt_lib_name.ptr);
+ } else {
+ const msvcrt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "msvcrt{}.lib\x00", d_str);
+ try ctx.args.append(msvcrt_lib_name.ptr);
+ }
+
+ const vcruntime_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}vcruntime{}.lib\x00", lib_str, d_str);
+ try ctx.args.append(vcruntime_lib_name.ptr);
+
+ const crt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}ucrt{}.lib\x00", lib_str, d_str);
+ try ctx.args.append(crt_lib_name.ptr);
+
+ // Visual C++ 2015 Conformance Changes
+ // https://msdn.microsoft.com/en-us/library/bb531344.aspx
+ try ctx.args.append(c"legacy_stdio_definitions.lib");
+
+ // msvcrt depends on kernel32
+ try ctx.args.append(c"kernel32.lib");
+ } else {
+ try ctx.args.append(c"-NODEFAULTLIB");
+ if (!is_library) {
+ try ctx.args.append(c"-ENTRY:WinMainCRTStartup");
+ // TODO
+ //if (g->have_winmain) {
+ // lj->args.append("-ENTRY:WinMain");
+ //} else {
+ // lj->args.append("-ENTRY:WinMainCRTStartup");
+ //}
+ }
+ }
+
+ if (is_library and !ctx.comp.is_static) {
+ try ctx.args.append(c"-DLL");
+ }
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append(buf_ptr(buf_sprintf("-LIBPATH:%s", lib_dir)));
+ //}
+
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ switch (ctx.comp.kind) {
+ Compilation.Kind.Exe, Compilation.Kind.Lib => {
+ if (!ctx.comp.haveLibC()) {
+ @panic("TODO");
+ //Buf *builtin_o_path = build_o(g, "builtin");
+ //lj->args.append(buf_ptr(builtin_o_path));
+ }
+
+ // msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage
+ // TODO
+ //Buf *compiler_rt_o_path = build_compiler_rt(g);
+ //lj->args.append(buf_ptr(compiler_rt_o_path));
+ },
+ Compilation.Kind.Obj => {},
+ }
+
+ //Buf *def_contents = buf_alloc();
+ //ZigList<const char *> gen_lib_args = {0};
+ //for (size_t lib_i = 0; lib_i < g->link_libs_list.length; lib_i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(lib_i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // if (link_lib->provided_explicitly) {
+ // if (lj->codegen->zig_target.env_type == ZigLLVM_GNU) {
+ // Buf *arg = buf_sprintf("-l%s", buf_ptr(link_lib->name));
+ // lj->args.append(buf_ptr(arg));
+ // }
+ // else {
+ // lj->args.append(buf_ptr(link_lib->name));
+ // }
+ // } else {
+ // buf_resize(def_contents, 0);
+ // buf_appendf(def_contents, "LIBRARY %s\nEXPORTS\n", buf_ptr(link_lib->name));
+ // for (size_t exp_i = 0; exp_i < link_lib->symbols.length; exp_i += 1) {
+ // Buf *symbol_name = link_lib->symbols.at(exp_i);
+ // buf_appendf(def_contents, "%s\n", buf_ptr(symbol_name));
+ // }
+ // buf_appendf(def_contents, "\n");
+
+ // Buf *def_path = buf_alloc();
+ // os_path_join(g->cache_dir, buf_sprintf("%s.def", buf_ptr(link_lib->name)), def_path);
+ // os_write_file(def_path, def_contents);
+
+ // Buf *generated_lib_path = buf_alloc();
+ // os_path_join(g->cache_dir, buf_sprintf("%s.lib", buf_ptr(link_lib->name)), generated_lib_path);
+
+ // gen_lib_args.resize(0);
+ // gen_lib_args.append("link");
+
+ // coff_append_machine_arg(g, &gen_lib_args);
+ // gen_lib_args.append(buf_ptr(buf_sprintf("-DEF:%s", buf_ptr(def_path))));
+ // gen_lib_args.append(buf_ptr(buf_sprintf("-OUT:%s", buf_ptr(generated_lib_path))));
+ // Buf diag = BUF_INIT;
+ // if (!zig_lld_link(g->zig_target.oformat, gen_lib_args.items, gen_lib_args.length, &diag)) {
+ // fprintf(stderr, "%s\n", buf_ptr(&diag));
+ // exit(1);
+ // }
+ // lj->args.append(buf_ptr(generated_lib_path));
+ // }
+ //}
+}
+
+fn constructLinkerArgsMachO(ctx: *Context) !void {
+ try ctx.args.append(c"-demangle");
+
+ if (ctx.comp.linker_rdynamic) {
+ try ctx.args.append(c"-export_dynamic");
+ }
+
+ const is_lib = ctx.comp.kind == Compilation.Kind.Lib;
+ const shared = !ctx.comp.is_static and is_lib;
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"-static");
+ } else {
+ try ctx.args.append(c"-dynamic");
+ }
+
+ //if (is_lib) {
+ // if (!g->is_static) {
+ // lj->args.append("-dylib");
+
+ // Buf *compat_vers = buf_sprintf("%" ZIG_PRI_usize ".0.0", g->version_major);
+ // lj->args.append("-compatibility_version");
+ // lj->args.append(buf_ptr(compat_vers));
+
+ // Buf *cur_vers = buf_sprintf("%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize,
+ // g->version_major, g->version_minor, g->version_patch);
+ // lj->args.append("-current_version");
+ // lj->args.append(buf_ptr(cur_vers));
+
+ // // TODO getting an error when running an executable when doing this rpath thing
+ // //Buf *dylib_install_name = buf_sprintf("@rpath/lib%s.%" ZIG_PRI_usize ".dylib",
+ // // buf_ptr(g->root_out_name), g->version_major);
+ // //lj->args.append("-install_name");
+ // //lj->args.append(buf_ptr(dylib_install_name));
+
+ // if (buf_len(&lj->out_file) == 0) {
+ // buf_appendf(&lj->out_file, "lib%s.%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".dylib",
+ // buf_ptr(g->root_out_name), g->version_major, g->version_minor, g->version_patch);
+ // }
+ // }
+ //}
+
+ try ctx.args.append(c"-arch");
+ const darwin_arch_str = try std.cstr.addNullByte(
+ &ctx.arena.allocator,
+ ctx.comp.target.getDarwinArchString(),
+ );
+ try ctx.args.append(darwin_arch_str.ptr);
+
+ const platform = try DarwinPlatform.get(ctx.comp);
+ switch (platform.kind) {
+ DarwinPlatform.Kind.MacOS => try ctx.args.append(c"-macosx_version_min"),
+ DarwinPlatform.Kind.IPhoneOS => try ctx.args.append(c"-iphoneos_version_min"),
+ DarwinPlatform.Kind.IPhoneOSSimulator => try ctx.args.append(c"-ios_simulator_version_min"),
+ }
+ const ver_str = try std.fmt.allocPrint(&ctx.arena.allocator, "{}.{}.{}\x00", platform.major, platform.minor, platform.micro);
+ try ctx.args.append(ver_str.ptr);
+
+ if (ctx.comp.kind == Compilation.Kind.Exe) {
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"-no_pie");
+ } else {
+ try ctx.args.append(c"-pie");
+ }
+ }
+
+ try ctx.args.append(c"-o");
+ try ctx.args.append(ctx.out_file_path.ptr());
+
+ //for (size_t i = 0; i < g->rpath_list.length; i += 1) {
+ // Buf *rpath = g->rpath_list.at(i);
+ // add_rpath(lj, rpath);
+ //}
+ //add_rpath(lj, &lj->out_file);
+
+ if (shared) {
+ try ctx.args.append(c"-headerpad_max_install_names");
+ } else if (ctx.comp.is_static) {
+ try ctx.args.append(c"-lcrt0.o");
+ } else {
+ switch (platform.kind) {
+ DarwinPlatform.Kind.MacOS => {
+ if (platform.versionLessThan(10, 5)) {
+ try ctx.args.append(c"-lcrt1.o");
+ } else if (platform.versionLessThan(10, 6)) {
+ try ctx.args.append(c"-lcrt1.10.5.o");
+ } else if (platform.versionLessThan(10, 8)) {
+ try ctx.args.append(c"-lcrt1.10.6.o");
+ }
+ },
+ DarwinPlatform.Kind.IPhoneOS => {
+ if (ctx.comp.target.getArch() == builtin.Arch.aarch64) {
+ // iOS does not need any crt1 files for arm64
+ } else if (platform.versionLessThan(3, 1)) {
+ try ctx.args.append(c"-lcrt1.o");
+ } else if (platform.versionLessThan(6, 0)) {
+ try ctx.args.append(c"-lcrt1.3.1.o");
+ }
+ },
+ DarwinPlatform.Kind.IPhoneOSSimulator => {}, // no crt1.o needed
+ }
+ }
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append("-L");
+ // lj->args.append(lib_dir);
+ //}
+
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ //// compiler_rt on darwin is missing some stuff, so we still build it and rely on LinkOnce
+ //if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ // Buf *compiler_rt_o_path = build_compiler_rt(g);
+ // lj->args.append(buf_ptr(compiler_rt_o_path));
+ //}
+
+ if (ctx.comp.target == Target.Native) {
+ for (ctx.comp.link_libs_list.toSliceConst()) |lib| {
+ if (mem.eql(u8, lib.name, "c")) {
+ // on Darwin, libSystem has libc in it, but also you have to use it
+ // to make syscalls because the syscall numbers are not documented
+ // and change between versions.
+ // so we always link against libSystem
+ try ctx.args.append(c"-lSystem");
+ } else {
+ if (mem.indexOfScalar(u8, lib.name, '/') == null) {
+ const arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-l{}\x00", lib.name);
+ try ctx.args.append(arg.ptr);
+ } else {
+ const arg = try std.cstr.addNullByte(&ctx.arena.allocator, lib.name);
+ try ctx.args.append(arg.ptr);
+ }
+ }
+ }
+ } else {
+ try ctx.args.append(c"-undefined");
+ try ctx.args.append(c"dynamic_lookup");
+ }
+
+ if (platform.kind == DarwinPlatform.Kind.MacOS) {
+ if (platform.versionLessThan(10, 5)) {
+ try ctx.args.append(c"-lgcc_s.10.4");
+ } else if (platform.versionLessThan(10, 6)) {
+ try ctx.args.append(c"-lgcc_s.10.5");
+ }
+ } else {
+ @panic("TODO");
+ }
+
+ //for (size_t i = 0; i < g->darwin_frameworks.length; i += 1) {
+ // lj->args.append("-framework");
+ // lj->args.append(buf_ptr(g->darwin_frameworks.at(i)));
+ //}
+}
+
+fn constructLinkerArgsWasm(ctx: *Context) void {
+ @panic("TODO");
+}
+
+fn addFnObjects(ctx: *Context) !void {
+ // at this point it's guaranteed nobody else has this lock, so we circumvent it
+ // and avoid having to be a coroutine
+ const fn_link_set = &ctx.comp.fn_link_set.private_data;
+
+ var it = fn_link_set.first;
+ while (it) |node| {
+ const fn_val = node.data orelse {
+ // handle the tombstone. See Value.Fn.destroy.
+ it = node.next;
+ fn_link_set.remove(node);
+ ctx.comp.gpa().destroy(node);
+ continue;
+ };
+ try ctx.args.append(fn_val.containing_object.ptr());
+ it = node.next;
+ }
+}
+
+const DarwinPlatform = struct {
+ kind: Kind,
+ major: u32,
+ minor: u32,
+ micro: u32,
+
+ const Kind = enum {
+ MacOS,
+ IPhoneOS,
+ IPhoneOSSimulator,
+ };
+
+ fn get(comp: *Compilation) !DarwinPlatform {
+ var result: DarwinPlatform = undefined;
+ const ver_str = switch (comp.darwin_version_min) {
+ Compilation.DarwinVersionMin.MacOS => |ver| blk: {
+ result.kind = Kind.MacOS;
+ break :blk ver;
+ },
+ Compilation.DarwinVersionMin.Ios => |ver| blk: {
+ result.kind = Kind.IPhoneOS;
+ break :blk ver;
+ },
+ Compilation.DarwinVersionMin.None => blk: {
+ assert(comp.target.getOs() == builtin.Os.macosx);
+ result.kind = Kind.MacOS;
+ break :blk "10.10";
+ },
+ };
+
+ var had_extra: bool = undefined;
+ try darwinGetReleaseVersion(
+ ver_str,
+ &result.major,
+ &result.minor,
+ &result.micro,
+ &had_extra,
+ );
+ if (had_extra or result.major != 10 or result.minor >= 100 or result.micro >= 100) {
+ return error.InvalidDarwinVersionString;
+ }
+
+ if (result.kind == Kind.IPhoneOS) {
+ switch (comp.target.getArch()) {
+ builtin.Arch.i386,
+ builtin.Arch.x86_64,
+ => result.kind = Kind.IPhoneOSSimulator,
+ else => {},
+ }
+ }
+ return result;
+ }
+
+ fn versionLessThan(self: DarwinPlatform, major: u32, minor: u32) bool {
+ if (self.major < major)
+ return true;
+ if (self.major > major)
+ return false;
+ if (self.minor < minor)
+ return true;
+ return false;
+ }
+};
+
+/// Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
+/// grouped values as integers. Numbers which are not provided are set to 0.
+/// return true if the entire string was parsed (9.2), or all groups were
+/// parsed (10.3.5extrastuff).
+fn darwinGetReleaseVersion(str: []const u8, major: *u32, minor: *u32, micro: *u32, had_extra: *bool) !void {
+ major.* = 0;
+ minor.* = 0;
+ micro.* = 0;
+ had_extra.* = false;
+
+ if (str.len == 0)
+ return error.InvalidDarwinVersionString;
+
+ var start_pos: usize = 0;
+ for ([]*u32{ major, minor, micro }) |v| {
+ const dot_pos = mem.indexOfScalarPos(u8, str, start_pos, '.');
+ const end_pos = dot_pos orelse str.len;
+ v.* = std.fmt.parseUnsigned(u32, str[start_pos..end_pos], 10) catch return error.InvalidDarwinVersionString;
+ start_pos = (dot_pos orelse return) + 1;
+ if (start_pos == str.len) return;
+ }
+ had_extra.* = true;
+}
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 16c359adcf..778d3fae07 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -2,12 +2,213 @@ const builtin = @import("builtin");
const c = @import("c.zig");
const assert = @import("std").debug.assert;
-pub const ValueRef = removeNullability(c.LLVMValueRef);
-pub const ModuleRef = removeNullability(c.LLVMModuleRef);
-pub const ContextRef = removeNullability(c.LLVMContextRef);
+// we wrap the c module for 3 reasons:
+// 1. to avoid accidentally calling the non-thread-safe functions
+// 2. patch up some of the types to remove nullability
+// 3. some functions have been augmented by zig_llvm.cpp to be more powerful,
+// such as ZigLLVMTargetMachineEmitToFile
+
+pub const AttributeIndex = c_uint;
+pub const Bool = c_int;
+
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
+pub const ContextRef = removeNullability(c.LLVMContextRef);
+pub const ModuleRef = removeNullability(c.LLVMModuleRef);
+pub const ValueRef = removeNullability(c.LLVMValueRef);
+pub const TypeRef = removeNullability(c.LLVMTypeRef);
+pub const BasicBlockRef = removeNullability(c.LLVMBasicBlockRef);
+pub const AttributeRef = removeNullability(c.LLVMAttributeRef);
+pub const TargetRef = removeNullability(c.LLVMTargetRef);
+pub const TargetMachineRef = removeNullability(c.LLVMTargetMachineRef);
+pub const TargetDataRef = removeNullability(c.LLVMTargetDataRef);
+pub const DIBuilder = c.ZigLLVMDIBuilder;
+
+pub const ABIAlignmentOfType = c.LLVMABIAlignmentOfType;
+pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex;
+pub const AddFunction = c.LLVMAddFunction;
+pub const AddGlobal = c.LLVMAddGlobal;
+pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag;
+pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag;
+pub const ArrayType = c.LLVMArrayType;
+pub const BuildLoad = c.LLVMBuildLoad;
+pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
+pub const ConstAllOnes = c.LLVMConstAllOnes;
+pub const ConstArray = c.LLVMConstArray;
+pub const ConstBitCast = c.LLVMConstBitCast;
+pub const ConstInt = c.LLVMConstInt;
+pub const ConstIntOfArbitraryPrecision = c.LLVMConstIntOfArbitraryPrecision;
+pub const ConstNeg = c.LLVMConstNeg;
+pub const ConstNull = c.LLVMConstNull;
+pub const ConstStringInContext = c.LLVMConstStringInContext;
+pub const ConstStructInContext = c.LLVMConstStructInContext;
+pub const CopyStringRepOfTargetData = c.LLVMCopyStringRepOfTargetData;
+pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext;
+pub const CreateCompileUnit = c.ZigLLVMCreateCompileUnit;
+pub const CreateDIBuilder = c.ZigLLVMCreateDIBuilder;
+pub const CreateEnumAttribute = c.LLVMCreateEnumAttribute;
+pub const CreateFile = c.ZigLLVMCreateFile;
+pub const CreateStringAttribute = c.LLVMCreateStringAttribute;
+pub const CreateTargetDataLayout = c.LLVMCreateTargetDataLayout;
+pub const CreateTargetMachine = c.LLVMCreateTargetMachine;
+pub const DIBuilderFinalize = c.ZigLLVMDIBuilderFinalize;
+pub const DisposeBuilder = c.LLVMDisposeBuilder;
+pub const DisposeDIBuilder = c.ZigLLVMDisposeDIBuilder;
+pub const DisposeMessage = c.LLVMDisposeMessage;
+pub const DisposeModule = c.LLVMDisposeModule;
+pub const DisposeTargetData = c.LLVMDisposeTargetData;
+pub const DisposeTargetMachine = c.LLVMDisposeTargetMachine;
+pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext;
+pub const DumpModule = c.LLVMDumpModule;
+pub const FP128TypeInContext = c.LLVMFP128TypeInContext;
+pub const FloatTypeInContext = c.LLVMFloatTypeInContext;
+pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName;
+pub const GetHostCPUName = c.ZigLLVMGetHostCPUName;
+pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext;
+pub const GetNativeFeatures = c.ZigLLVMGetNativeFeatures;
+pub const GetUndef = c.LLVMGetUndef;
+pub const HalfTypeInContext = c.LLVMHalfTypeInContext;
+pub const InitializeAllAsmParsers = c.LLVMInitializeAllAsmParsers;
+pub const InitializeAllAsmPrinters = c.LLVMInitializeAllAsmPrinters;
+pub const InitializeAllTargetInfos = c.LLVMInitializeAllTargetInfos;
+pub const InitializeAllTargetMCs = c.LLVMInitializeAllTargetMCs;
+pub const InitializeAllTargets = c.LLVMInitializeAllTargets;
+pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext;
+pub const Int128TypeInContext = c.LLVMInt128TypeInContext;
+pub const Int16TypeInContext = c.LLVMInt16TypeInContext;
+pub const Int1TypeInContext = c.LLVMInt1TypeInContext;
+pub const Int32TypeInContext = c.LLVMInt32TypeInContext;
+pub const Int64TypeInContext = c.LLVMInt64TypeInContext;
+pub const Int8TypeInContext = c.LLVMInt8TypeInContext;
+pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext;
+pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext;
+pub const IntTypeInContext = c.LLVMIntTypeInContext;
+pub const LabelTypeInContext = c.LLVMLabelTypeInContext;
+pub const MDNodeInContext = c.LLVMMDNodeInContext;
+pub const MDStringInContext = c.LLVMMDStringInContext;
+pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext;
+pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext;
+pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext;
+pub const PointerType = c.LLVMPointerType;
+pub const SetAlignment = c.LLVMSetAlignment;
+pub const SetDataLayout = c.LLVMSetDataLayout;
+pub const SetGlobalConstant = c.LLVMSetGlobalConstant;
+pub const SetInitializer = c.LLVMSetInitializer;
+pub const SetLinkage = c.LLVMSetLinkage;
+pub const SetTarget = c.LLVMSetTarget;
+pub const SetUnnamedAddr = c.LLVMSetUnnamedAddr;
+pub const SetVolatile = c.LLVMSetVolatile;
+pub const StructTypeInContext = c.LLVMStructTypeInContext;
+pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
+pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
+pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
+pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
+
+pub const GetElementType = LLVMGetElementType;
+extern fn LLVMGetElementType(Ty: TypeRef) TypeRef;
+
+pub const TypeOf = LLVMTypeOf;
+extern fn LLVMTypeOf(Val: ValueRef) TypeRef;
+
+pub const BuildStore = LLVMBuildStore;
+extern fn LLVMBuildStore(arg0: BuilderRef, Val: ValueRef, Ptr: ValueRef) ?ValueRef;
+
+pub const BuildAlloca = LLVMBuildAlloca;
+extern fn LLVMBuildAlloca(arg0: BuilderRef, Ty: TypeRef, Name: ?[*]const u8) ?ValueRef;
+
+pub const ConstInBoundsGEP = LLVMConstInBoundsGEP;
+pub extern fn LLVMConstInBoundsGEP(ConstantVal: ValueRef, ConstantIndices: [*]ValueRef, NumIndices: c_uint) ?ValueRef;
+
+pub const GetTargetFromTriple = LLVMGetTargetFromTriple;
+extern fn LLVMGetTargetFromTriple(Triple: [*]const u8, T: *TargetRef, ErrorMessage: ?*[*]u8) Bool;
+
+pub const VerifyModule = LLVMVerifyModule;
+extern fn LLVMVerifyModule(M: ModuleRef, Action: VerifierFailureAction, OutMessage: *?[*]u8) Bool;
+
+pub const GetInsertBlock = LLVMGetInsertBlock;
+extern fn LLVMGetInsertBlock(Builder: BuilderRef) BasicBlockRef;
+
+pub const FunctionType = LLVMFunctionType;
+extern fn LLVMFunctionType(
+ ReturnType: TypeRef,
+ ParamTypes: [*]TypeRef,
+ ParamCount: c_uint,
+ IsVarArg: Bool,
+) ?TypeRef;
+
+pub const GetParam = LLVMGetParam;
+extern fn LLVMGetParam(Fn: ValueRef, Index: c_uint) ValueRef;
+
+pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext;
+extern fn LLVMAppendBasicBlockInContext(C: ContextRef, Fn: ValueRef, Name: [*]const u8) ?BasicBlockRef;
+
+pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd;
+extern fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef) void;
+
+pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction;
+pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction;
+pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction;
+pub const VerifierFailureAction = c.LLVMVerifierFailureAction;
+
+pub const CodeGenLevelNone = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelNone;
+pub const CodeGenLevelLess = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelLess;
+pub const CodeGenLevelDefault = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelDefault;
+pub const CodeGenLevelAggressive = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelAggressive;
+pub const CodeGenOptLevel = c.LLVMCodeGenOptLevel;
+
+pub const RelocDefault = c.LLVMRelocMode.LLVMRelocDefault;
+pub const RelocStatic = c.LLVMRelocMode.LLVMRelocStatic;
+pub const RelocPIC = c.LLVMRelocMode.LLVMRelocPIC;
+pub const RelocDynamicNoPic = c.LLVMRelocMode.LLVMRelocDynamicNoPic;
+pub const RelocMode = c.LLVMRelocMode;
+
+pub const CodeModelDefault = c.LLVMCodeModel.LLVMCodeModelDefault;
+pub const CodeModelJITDefault = c.LLVMCodeModel.LLVMCodeModelJITDefault;
+pub const CodeModelSmall = c.LLVMCodeModel.LLVMCodeModelSmall;
+pub const CodeModelKernel = c.LLVMCodeModel.LLVMCodeModelKernel;
+pub const CodeModelMedium = c.LLVMCodeModel.LLVMCodeModelMedium;
+pub const CodeModelLarge = c.LLVMCodeModel.LLVMCodeModelLarge;
+pub const CodeModel = c.LLVMCodeModel;
+
+pub const EmitAssembly = EmitOutputType.ZigLLVM_EmitAssembly;
+pub const EmitBinary = EmitOutputType.ZigLLVM_EmitBinary;
+pub const EmitLLVMIr = EmitOutputType.ZigLLVM_EmitLLVMIr;
+pub const EmitOutputType = c.ZigLLVM_EmitOutputType;
+
+pub const CCallConv = c.LLVMCCallConv;
+pub const FastCallConv = c.LLVMFastCallConv;
+pub const ColdCallConv = c.LLVMColdCallConv;
+pub const WebKitJSCallConv = c.LLVMWebKitJSCallConv;
+pub const AnyRegCallConv = c.LLVMAnyRegCallConv;
+pub const X86StdcallCallConv = c.LLVMX86StdcallCallConv;
+pub const X86FastcallCallConv = c.LLVMX86FastcallCallConv;
+pub const CallConv = c.LLVMCallConv;
+
+pub const FnInline = extern enum {
+ Auto,
+ Always,
+ Never,
+};
fn removeNullability(comptime T: type) type {
- comptime assert(@typeId(T) == builtin.TypeId.Nullable);
+ comptime assert(@typeId(T) == builtin.TypeId.Optional);
return T.Child;
}
+
+pub const BuildRet = LLVMBuildRet;
+extern fn LLVMBuildRet(arg0: BuilderRef, V: ?ValueRef) ?ValueRef;
+
+pub const TargetMachineEmitToFile = ZigLLVMTargetMachineEmitToFile;
+extern fn ZigLLVMTargetMachineEmitToFile(
+ targ_machine_ref: TargetMachineRef,
+ module_ref: ModuleRef,
+ filename: [*]const u8,
+ output_type: EmitOutputType,
+ error_message: *[*]u8,
+ is_debug: bool,
+ is_small: bool,
+) bool;
+
+pub const BuildCall = ZigLLVMBuildCall;
+extern fn ZigLLVMBuildCall(B: BuilderRef, Fn: ValueRef, Args: [*]ValueRef, NumArgs: c_uint, CC: c_uint, fn_inline: FnInline, Name: [*]const u8) ?ValueRef;
+
+pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index c1a6bbe99a..37bb435c1b 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
+const event = std.event;
const os = std.os;
const io = std.io;
const mem = std.mem;
@@ -13,247 +14,118 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
-const Module = @import("module.zig").Module;
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target;
+const errmsg = @import("errmsg.zig");
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
-var stderr: &io.OutStream(io.FileOutStream.Error) = undefined;
-var stdout: &io.OutStream(io.FileOutStream.Error) = undefined;
+var stderr_file: os.File = undefined;
+var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
+var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const usage =
\\usage: zig [command] [options]
\\
\\Commands:
\\
- \\ build Build project from build.zig
- \\ build-exe [source] Create executable from source or object files
- \\ build-lib [source] Create library from source or object files
- \\ build-obj [source] Create object from source or assembly
- \\ fmt [source] Parse file and render in canonical zig format
- \\ run [source] Create executable and run immediately
- \\ targets List available compilation targets
- \\ test [source] Create and run a test build
- \\ translate-c [source] Convert c code to zig code
- \\ version Print version number and exit
- \\ zen Print zen of zig and exit
+ \\ build-exe [source] Create executable from source or object files
+ \\ build-lib [source] Create library from source or object files
+ \\ build-obj [source] Create object from source or assembly
+ \\ fmt [source] Parse file and render in canonical zig format
+ \\ libc [paths_file] Display native libc paths file or validate one
+ \\ targets List available compilation targets
+ \\ version Print version number and exit
+ \\ zen Print zen of zig and exit
\\
\\
- ;
+;
const Command = struct {
name: []const u8,
- exec: fn(&Allocator, []const []const u8) error!void,
+ exec: fn (*Allocator, []const []const u8) error!void,
};
pub fn main() !void {
- var allocator = std.heap.c_allocator;
+ // This allocator needs to be thread-safe because we use it for the event.Loop
+ // which multiplexes coroutines onto kernel threads.
+ // libc allocator is guaranteed to have this property.
+ const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
stdout = &stdout_out_stream.stream;
- var stderr_file = try std.io.getStdErr();
+ stderr_file = try std.io.getStdErr();
var stderr_out_stream = std.io.FileOutStream.init(&stderr_file);
stderr = &stderr_out_stream.stream;
const args = try os.argsAlloc(allocator);
- defer os.argsFree(allocator, args);
+ // TODO I'm getting unreachable code here, which shouldn't happen
+ //defer os.argsFree(allocator, args);
if (args.len <= 1) {
+ try stderr.write("expected command argument\n\n");
try stderr.write(usage);
os.exit(1);
}
- const commands = []Command {
- Command { .name = "build", .exec = cmdBuild },
- Command { .name = "build-exe", .exec = cmdBuildExe },
- Command { .name = "build-lib", .exec = cmdBuildLib },
- Command { .name = "build-obj", .exec = cmdBuildObj },
- Command { .name = "fmt", .exec = cmdFmt },
- Command { .name = "run", .exec = cmdRun },
- Command { .name = "targets", .exec = cmdTargets },
- Command { .name = "test", .exec = cmdTest },
- Command { .name = "translate-c", .exec = cmdTranslateC },
- Command { .name = "version", .exec = cmdVersion },
- Command { .name = "zen", .exec = cmdZen },
+ const commands = []Command{
+ Command{
+ .name = "build-exe",
+ .exec = cmdBuildExe,
+ },
+ Command{
+ .name = "build-lib",
+ .exec = cmdBuildLib,
+ },
+ Command{
+ .name = "build-obj",
+ .exec = cmdBuildObj,
+ },
+ Command{
+ .name = "fmt",
+ .exec = cmdFmt,
+ },
+ Command{
+ .name = "libc",
+ .exec = cmdLibC,
+ },
+ Command{
+ .name = "targets",
+ .exec = cmdTargets,
+ },
+ Command{
+ .name = "version",
+ .exec = cmdVersion,
+ },
+ Command{
+ .name = "zen",
+ .exec = cmdZen,
+ },
// undocumented commands
- Command { .name = "help", .exec = cmdHelp },
- Command { .name = "internal", .exec = cmdInternal },
+ Command{
+ .name = "help",
+ .exec = cmdHelp,
+ },
+ Command{
+ .name = "internal",
+ .exec = cmdInternal,
+ },
};
for (commands) |command| {
if (mem.eql(u8, command.name, args[1])) {
- try command.exec(allocator, args[2..]);
- return;
+ return command.exec(allocator, args[2..]);
}
}
try stderr.print("unknown command: {}\n\n", args[1]);
try stderr.write(usage);
+ os.exit(1);
}
-// cmd:build ///////////////////////////////////////////////////////////////////////////////////////
-
-const usage_build =
- \\usage: zig build <options>
- \\
- \\General Options:
- \\ --help Print this help and exit
- \\ --init Generate a build.zig template
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose Print commands before executing them
- \\ --prefix [path] Override default install prefix
- \\
- \\Project-Specific Options:
- \\
- \\ Project-specific options become available when the build file is found.
- \\
- \\Advanced Options:
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
- \\ --verbose-link Enable compiler debug output for linking
- \\ --verbose-ir Enable compiler debug output for Zig IR
- \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
- \\ --verbose-cimport Enable compiler debug output for C imports
- \\
- \\
- ;
-
-const args_build_spec = []Flag {
- Flag.Bool("--help"),
- Flag.Bool("--init"),
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose"),
- Flag.Arg1("--prefix"),
-
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose-tokenize"),
- Flag.Bool("--verbose-ast"),
- Flag.Bool("--verbose-link"),
- Flag.Bool("--verbose-ir"),
- Flag.Bool("--verbose-llvm-ir"),
- Flag.Bool("--verbose-cimport"),
-};
-
-const missing_build_file =
- \\No 'build.zig' file found.
- \\
- \\Initialize a 'build.zig' template file with `zig build --init`,
- \\or build an executable directly with `zig build-exe $FILENAME.zig`.
- \\
- \\See: `zig build --help` or `zig help` for more options.
- \\
- ;
-
-fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_build);
- os.exit(0);
- }
-
- const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
- defer allocator.free(zig_lib_dir);
-
- const zig_std_dir = try os.path.join(allocator, zig_lib_dir, "std");
- defer allocator.free(zig_std_dir);
-
- const special_dir = try os.path.join(allocator, zig_std_dir, "special");
- defer allocator.free(special_dir);
-
- const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
- defer allocator.free(build_runner_path);
-
- const build_file = flags.single("build-file") ?? "build.zig";
- const build_file_abs = try os.path.resolve(allocator, ".", build_file);
- defer allocator.free(build_file_abs);
-
- const build_file_exists = os.File.access(allocator, build_file_abs, os.default_file_mode) catch false;
-
- if (flags.present("init")) {
- if (build_file_exists) {
- try stderr.print("build.zig already exists\n");
- os.exit(1);
- }
-
- // need a new scope for proper defer scope finalization on exit
- {
- const build_template_path = try os.path.join(allocator, special_dir, "build_file_template.zig");
- defer allocator.free(build_template_path);
-
- try os.copyFile(allocator, build_template_path, build_file_abs);
- try stderr.print("wrote build.zig template\n");
- }
-
- os.exit(0);
- }
-
- if (!build_file_exists) {
- try stderr.write(missing_build_file);
- os.exit(1);
- }
-
- // TODO: Invoke build.zig entrypoint directly?
- var zig_exe_path = try os.selfExePath(allocator);
- defer allocator.free(zig_exe_path);
-
- var build_args = ArrayList([]const u8).init(allocator);
- defer build_args.deinit();
-
- const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs);
-
- var full_cache_dir: []u8 = undefined;
- if (flags.single("cache-dir")) |cache_dir| {
- full_cache_dir = try os.path.resolve(allocator, ".", cache_dir, full_cache_dir);
- } else {
- full_cache_dir = try os.path.join(allocator, build_file_dirname, "zig-cache");
- }
- defer allocator.free(full_cache_dir);
-
- const path_to_build_exe = try os.path.join(allocator, full_cache_dir, "build");
- defer allocator.free(path_to_build_exe);
-
- try build_args.append(path_to_build_exe);
- try build_args.append(zig_exe_path);
- try build_args.append(build_file_dirname);
- try build_args.append(full_cache_dir);
-
- var proc = try os.ChildProcess.init(build_args.toSliceConst(), allocator);
- defer proc.deinit();
-
- var term = try proc.spawnAndWait();
- switch (term) {
- os.ChildProcess.Term.Exited => |status| {
- if (status != 0) {
- try stderr.print("{} exited with status {}\n", build_args.at(0), status);
- os.exit(1);
- }
- },
- os.ChildProcess.Term.Signal => |signal| {
- try stderr.print("{} killed by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Stopped => |signal| {
- try stderr.print("{} stopped by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Unknown => |status| {
- try stderr.print("{} encountered unknown failure {}\n", build_args.at(0), status);
- os.exit(1);
- },
- }
-}
-
-// cmd:build-exe ///////////////////////////////////////////////////////////////////////////////////
-
const usage_build_generic =
\\usage: zig build-exe <options> [file]
\\ zig build-lib <options> [file]
@@ -264,18 +136,20 @@ const usage_build_generic =
\\ --color [auto|off|on] Enable or disable colored error messages
\\
\\Compile Options:
+ \\ --libc [file] Provide a file which specifies libc paths
\\ --assembly [source] Add assembly file to build
- \\ --cache-dir [path] Override the cache directory
\\ --emit [filetype] Emit a specific file format as compilation output
\\ --enable-timing-info Print timing diagnostics
- \\ --libc-include-dir [path] Directory where libc stdlib.h resides
\\ --name [name] Override output name
\\ --output [file] Override destination path
\\ --output-h [file] Override generated header file path
\\ --pkg-begin [name] [path] Make package available to import and push current pkg
\\ --pkg-end Pop current pkg
- \\ --release-fast Build with optimizations on and safety off
- \\ --release-safe Build with optimizations on and safety on
+ \\ --mode [mode] Set the build mode
+ \\ debug (default) optimizations off, safety on
+ \\ release-fast optimizations on, safety off
+ \\ release-safe optimizations on, safety on
+ \\ release-small optimize for small binary, safety off
\\ --static Output will be statically linked
\\ --strip Exclude debug symbols
\\ --target-arch [name] Specify target architecture
@@ -294,12 +168,7 @@ const usage_build_generic =
\\
\\Link Options:
\\ --ar-path [path] Set the path to ar
- \\ --dynamic-linker [path] Set the path to ld.so
\\ --each-lib-rpath Add rpath for each used dynamic library
- \\ --libc-lib-dir [path] Directory where libc crt1.o resides
- \\ --libc-static-lib-dir [path] Directory where libc crtbegin.o resides
- \\ --msvc-lib-dir [path] (windows) directory where vcruntime.lib resides
- \\ --kernel32-lib-dir [path] (windows) directory where kernel32.lib resides
\\ --library [lib] Link against lib
\\ --forbid-library [lib] Make it an error to link against lib
\\ --library-path [dir] Add a directory to the library search path
@@ -317,25 +186,36 @@ const usage_build_generic =
\\ --ver-patch [ver] Dynamic library semver patch version
\\
\\
- ;
+;
-const args_build_generic = []Flag {
+const args_build_generic = []Flag{
Flag.Bool("--help"),
- Flag.Option("--color", []const []const u8 { "auto", "off", "on" }),
+ Flag.Option("--color", []const []const u8{
+ "auto",
+ "off",
+ "on",
+ }),
+ Flag.Option("--mode", []const []const u8{
+ "debug",
+ "release-fast",
+ "release-safe",
+ "release-small",
+ }),
Flag.ArgMergeN("--assembly", 1),
- Flag.Arg1("--cache-dir"),
- Flag.Option("--emit", []const []const u8 { "asm", "bin", "llvm-ir" }),
+ Flag.Option("--emit", []const []const u8{
+ "asm",
+ "bin",
+ "llvm-ir",
+ }),
Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
+ Flag.Arg1("--libc"),
Flag.Arg1("--name"),
Flag.Arg1("--output"),
Flag.Arg1("--output-h"),
// NOTE: Parsed manually after initial check
Flag.ArgN("--pkg-begin", 2),
Flag.Bool("--pkg-end"),
- Flag.Bool("--release-fast"),
- Flag.Bool("--release-safe"),
Flag.Bool("--static"),
Flag.Bool("--strip"),
Flag.Arg1("--target-arch"),
@@ -353,12 +233,7 @@ const args_build_generic = []Flag {
Flag.Arg1("-mllvm"),
Flag.Arg1("--ar-path"),
- Flag.Arg1("--dynamic-linker"),
Flag.Bool("--each-lib-rpath"),
- Flag.Arg1("--libc-lib-dir"),
- Flag.Arg1("--libc-static-lib-dir"),
- Flag.Arg1("--msvc-lib-dir"),
- Flag.Arg1("--kernel32-lib-dir"),
Flag.ArgMergeN("--library", 1),
Flag.ArgMergeN("--forbid-library", 1),
Flag.ArgMergeN("--library-path", 1),
@@ -377,49 +252,60 @@ const args_build_generic = []Flag {
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void {
+fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_build_generic);
+ try stdout.write(usage_build_generic);
os.exit(0);
}
- var build_mode = builtin.Mode.Debug;
- if (flags.present("release-fast")) {
- build_mode = builtin.Mode.ReleaseFast;
- } else if (flags.present("release-safe")) {
- build_mode = builtin.Mode.ReleaseSafe;
- }
+ const build_mode = blk: {
+ if (flags.single("mode")) |mode_flag| {
+ if (mem.eql(u8, mode_flag, "debug")) {
+ break :blk builtin.Mode.Debug;
+ } else if (mem.eql(u8, mode_flag, "release-fast")) {
+ break :blk builtin.Mode.ReleaseFast;
+ } else if (mem.eql(u8, mode_flag, "release-safe")) {
+ break :blk builtin.Mode.ReleaseSafe;
+ } else if (mem.eql(u8, mode_flag, "release-small")) {
+ break :blk builtin.Mode.ReleaseSmall;
+ } else unreachable;
+ } else {
+ break :blk builtin.Mode.Debug;
+ }
+ };
- var color = Module.ErrColor.Auto;
- if (flags.single("color")) |color_flag| {
- if (mem.eql(u8, color_flag, "auto")) {
- color = Module.ErrColor.Auto;
- } else if (mem.eql(u8, color_flag, "on")) {
- color = Module.ErrColor.On;
- } else if (mem.eql(u8, color_flag, "off")) {
- color = Module.ErrColor.Off;
+ const color = blk: {
+ if (flags.single("color")) |color_flag| {
+ if (mem.eql(u8, color_flag, "auto")) {
+ break :blk errmsg.Color.Auto;
+ } else if (mem.eql(u8, color_flag, "on")) {
+ break :blk errmsg.Color.On;
+ } else if (mem.eql(u8, color_flag, "off")) {
+ break :blk errmsg.Color.Off;
+ } else unreachable;
} else {
- unreachable;
+ break :blk errmsg.Color.Auto;
}
- }
+ };
- var emit_type = Module.Emit.Binary;
- if (flags.single("emit")) |emit_flag| {
- if (mem.eql(u8, emit_flag, "asm")) {
- emit_type = Module.Emit.Assembly;
- } else if (mem.eql(u8, emit_flag, "bin")) {
- emit_type = Module.Emit.Binary;
- } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
- emit_type = Module.Emit.LlvmIr;
+ const emit_type = blk: {
+ if (flags.single("emit")) |emit_flag| {
+ if (mem.eql(u8, emit_flag, "asm")) {
+ break :blk Compilation.Emit.Assembly;
+ } else if (mem.eql(u8, emit_flag, "bin")) {
+ break :blk Compilation.Emit.Binary;
+ } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
+ break :blk Compilation.Emit.LlvmIr;
+ } else unreachable;
} else {
- unreachable;
+ break :blk Compilation.Emit.Binary;
}
- }
+ };
- var cur_pkg = try Module.CliPkg.init(allocator, "", "", null); // TODO: Need a path, name?
+ var cur_pkg = try CliPkg.init(allocator, "", "", null);
defer cur_pkg.deinit();
var i: usize = 0;
@@ -432,15 +318,16 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
i += 1;
const new_pkg_path = args[i];
- var new_cur_pkg = try Module.CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
+ var new_cur_pkg = try CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
try cur_pkg.children.append(new_cur_pkg);
cur_pkg = new_cur_pkg;
} else if (mem.eql(u8, "--pkg-end", arg_name)) {
- if (cur_pkg.parent == null) {
+ if (cur_pkg.parent) |parent| {
+ cur_pkg = parent;
+ } else {
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = ??cur_pkg.parent;
}
}
@@ -449,138 +336,117 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
- var in_file: ?[]const u8 = undefined;
- switch (flags.positionals.len) {
- 0 => {
- try stderr.write("--name [name] not provided and unable to infer\n");
- os.exit(1);
- },
- 1 => {
- in_file = flags.positionals.at(0);
- },
+ const provided_name = flags.single("name");
+ const root_source_file = switch (flags.positionals.len) {
+ 0 => null,
+ 1 => flags.positionals.at(0),
else => {
- try stderr.write("only one zig input file is accepted during build\n");
+ try stderr.print("unexpected extra parameter: {}\n", flags.positionals.at(1));
os.exit(1);
},
- }
+ };
- const basename = os.path.basename(??in_file);
- var it = mem.split(basename, ".");
- const root_name = it.next() ?? {
- try stderr.write("file name cannot be empty\n");
- os.exit(1);
+ const root_name = if (provided_name) |n| n else blk: {
+ if (root_source_file) |file| {
+ const basename = os.path.basename(file);
+ var it = mem.split(basename, ".");
+ break :blk it.next() orelse basename;
+ } else {
+ try stderr.write("--name [name] not provided and unable to infer\n");
+ os.exit(1);
+ }
};
- const asm_a= flags.many("assembly");
- const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or (??obj_a).len == 0) and (asm_a == null or (??asm_a).len == 0)) {
+ const is_static = flags.present("static");
+
+ const assembly_files = flags.many("assembly");
+ const link_objects = flags.many("object");
+ if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and (??obj_a).len != 0)) {
+ if (out_type == Compilation.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
- const zig_root_source_file = in_file;
-
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch {
- os.exit(1);
- };
- defer allocator.free(full_cache_dir);
-
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
- var module =
- try Module.create(
- allocator,
- root_name,
- zig_root_source_file,
- Target.Native,
- out_type,
- build_mode,
- zig_lib_dir,
- full_cache_dir
- );
- defer module.destroy();
-
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10);
-
- module.is_test = false;
-
- if (flags.single("linker-script")) |linker_script| {
- module.linker_script = linker_script;
- }
+ var override_libc: LibCInstallation = undefined;
- module.each_lib_rpath = flags.present("each-lib-rpath");
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
- var clang_argv_buf = ArrayList([]const u8).init(allocator);
- defer clang_argv_buf.deinit();
- if (flags.many("mllvm")) |mllvm_flags| {
- for (mllvm_flags) |mllvm| {
- try clang_argv_buf.append("-mllvm");
- try clang_argv_buf.append(mllvm);
- }
+ var event_loop_local = try EventLoopLocal.init(&loop);
+ defer event_loop_local.deinit();
- module.llvm_argv = mllvm_flags;
- module.clang_argv = clang_argv_buf.toSliceConst();
- }
-
- module.strip = flags.present("strip");
- module.is_static = flags.present("static");
+ var comp = try Compilation.create(
+ &event_loop_local,
+ root_name,
+ root_source_file,
+ Target.Native,
+ out_type,
+ build_mode,
+ is_static,
+ zig_lib_dir,
+ );
+ defer comp.destroy();
- if (flags.single("libc-lib-dir")) |libc_lib_dir| {
- module.libc_lib_dir = libc_lib_dir;
- }
- if (flags.single("libc-static-lib-dir")) |libc_static_lib_dir| {
- module.libc_static_lib_dir = libc_static_lib_dir;
- }
- if (flags.single("libc-include-dir")) |libc_include_dir| {
- module.libc_include_dir = libc_include_dir;
+ if (flags.single("libc")) |libc_path| {
+ parseLibcPaths(loop.allocator, &override_libc, libc_path);
+ comp.override_libc = &override_libc;
}
- if (flags.single("msvc-lib-dir")) |msvc_lib_dir| {
- module.msvc_lib_dir = msvc_lib_dir;
- }
- if (flags.single("kernel32-lib-dir")) |kernel32_lib_dir| {
- module.kernel32_lib_dir = kernel32_lib_dir;
- }
- if (flags.single("dynamic-linker")) |dynamic_linker| {
- module.dynamic_linker = dynamic_linker;
+
+ for (flags.many("library")) |lib| {
+ _ = try comp.addLinkLib(lib, true);
}
- module.verbose_tokenize = flags.present("verbose-tokenize");
- module.verbose_ast_tree = flags.present("verbose-ast-tree");
- module.verbose_ast_fmt = flags.present("verbose-ast-fmt");
- module.verbose_link = flags.present("verbose-link");
- module.verbose_ir = flags.present("verbose-ir");
- module.verbose_llvm_ir = flags.present("verbose-llvm-ir");
- module.verbose_cimport = flags.present("verbose-cimport");
+ comp.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ comp.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ comp.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
- module.err_color = color;
+ comp.is_test = false;
- if (flags.many("library-path")) |lib_dirs| {
- module.lib_dirs = lib_dirs;
- }
+ comp.linker_script = flags.single("linker-script");
+ comp.each_lib_rpath = flags.present("each-lib-rpath");
- if (flags.many("framework")) |frameworks| {
- module.darwin_frameworks = frameworks;
- }
+ var clang_argv_buf = ArrayList([]const u8).init(allocator);
+ defer clang_argv_buf.deinit();
- if (flags.many("rpath")) |rpath_list| {
- module.rpath_list = rpath_list;
+ const mllvm_flags = flags.many("mllvm");
+ for (mllvm_flags) |mllvm| {
+ try clang_argv_buf.append("-mllvm");
+ try clang_argv_buf.append(mllvm);
}
+ comp.llvm_argv = mllvm_flags;
+ comp.clang_argv = clang_argv_buf.toSliceConst();
+
+ comp.strip = flags.present("strip");
+
+ comp.verbose_tokenize = flags.present("verbose-tokenize");
+ comp.verbose_ast_tree = flags.present("verbose-ast-tree");
+ comp.verbose_ast_fmt = flags.present("verbose-ast-fmt");
+ comp.verbose_link = flags.present("verbose-link");
+ comp.verbose_ir = flags.present("verbose-ir");
+ comp.verbose_llvm_ir = flags.present("verbose-llvm-ir");
+ comp.verbose_cimport = flags.present("verbose-cimport");
+
+ comp.err_color = color;
+ comp.lib_dirs = flags.many("library-path");
+ comp.darwin_frameworks = flags.many("framework");
+ comp.rpath_list = flags.many("rpath");
+
if (flags.single("output-h")) |output_h| {
- module.out_h_path = output_h;
+ comp.out_h_path = output_h;
}
- module.windows_subsystem_windows = flags.present("mwindows");
- module.windows_subsystem_console = flags.present("mconsole");
- module.linker_rdynamic = flags.present("rdynamic");
+ comp.windows_subsystem_windows = flags.present("mwindows");
+ comp.windows_subsystem_console = flags.present("mconsole");
+ comp.linker_rdynamic = flags.present("rdynamic");
if (flags.single("mmacosx-version-min") != null and flags.single("mios-version-min") != null) {
try stderr.write("-mmacosx-version-min and -mios-version-min options not allowed together\n");
@@ -588,48 +454,56 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
}
if (flags.single("mmacosx-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin { .MacOS = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .MacOS = ver };
}
if (flags.single("mios-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin { .Ios = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .Ios = ver };
}
- module.emit_file_type = emit_type;
- if (flags.many("object")) |objects| {
- module.link_objects = objects;
- }
- if (flags.many("assembly")) |assembly_files| {
- module.assembly_files = assembly_files;
- }
+ comp.emit_file_type = emit_type;
+ comp.assembly_files = assembly_files;
+ comp.link_out_file = flags.single("output");
+ comp.link_objects = link_objects;
- try module.build();
- try module.link(flags.single("out-file") ?? null);
+ try comp.build();
+ const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color);
+ defer cancel process_build_events_handle;
+ loop.run();
+}
- if (flags.present("print-timing-info")) {
- // codegen_print_timing_info(g, stderr);
- }
+async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async comp.events.get() catch unreachable);
- try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ return;
+ },
+ Compilation.Event.Error => |err| {
+ std.debug.warn("build failed: {}\n", @errorName(err));
+ os.exit(1);
+ },
+ Compilation.Event.Fail => |msgs| {
+ for (msgs) |msg| {
+ defer msg.destroy();
+ msg.printToFile(&stderr_file, color) catch os.exit(1);
+ }
+ },
+ }
}
-fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Exe);
+fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Exe);
}
-// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-
-fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Lib);
+fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Lib);
}
-// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-
-fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Obj);
+fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Obj);
}
-// cmd:fmt /////////////////////////////////////////////////////////////////////////////////////////
-
const usage_fmt =
\\usage: zig fmt [file]...
\\
@@ -637,82 +511,233 @@ const usage_fmt =
\\
\\Options:
\\ --help Print this help and exit
- \\ --keep-backups Retain backup entries for every file
+ \\ --color [auto|off|on] Enable or disable colored error messages
+ \\ --stdin Format code from stdin
\\
\\
- ;
+;
-const args_fmt_spec = []Flag {
+const args_fmt_spec = []Flag{
Flag.Bool("--help"),
- Flag.Bool("--keep-backups"),
+ Flag.Option("--color", []const []const u8{
+ "auto",
+ "off",
+ "on",
+ }),
+ Flag.Bool("--stdin"),
+};
+
+const Fmt = struct {
+ seen: std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8),
+ queue: std.LinkedList([]const u8),
+ any_error: bool,
+
+ // file_path must outlive Fmt
+ fn addToQueue(self: *Fmt, file_path: []const u8) !void {
+ const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
+ .prev = undefined,
+ .next = undefined,
+ .data = file_path,
+ });
+
+ if (try self.seen.put(file_path, {})) |_| return;
+
+ self.queue.append(new_node);
+ }
+
+ fn addDirToQueue(self: *Fmt, file_path: []const u8) !void {
+ var dir = try std.os.Dir.open(self.seen.allocator, file_path);
+ defer dir.close();
+ while (try dir.next()) |entry| {
+ if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
+ const full_path = try os.path.join(self.seen.allocator, file_path, entry.name);
+ try self.addToQueue(full_path);
+ }
+ }
+ }
};
-fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
+fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
+ libc.parse(allocator, libc_paths_file, stderr) catch |err| {
+ stderr.print(
+ "Unable to parse libc path file '{}': {}.\n" ++
+ "Try running `zig libc` to see an example for the native target.\n",
+ libc_paths_file,
+ @errorName(err),
+ ) catch os.exit(1);
+ os.exit(1);
+ };
+}
+
+fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
+ switch (args.len) {
+ 0 => {},
+ 1 => {
+ var libc_installation: LibCInstallation = undefined;
+ parseLibcPaths(allocator, &libc_installation, args[0]);
+ return;
+ },
+ else => {
+ try stderr.print("unexpected extra parameter: {}\n", args[1]);
+ os.exit(1);
+ },
+ }
+
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var event_loop_local = try EventLoopLocal.init(&loop);
+ defer event_loop_local.deinit();
+
+ const handle = try async<loop.allocator> findLibCAsync(&event_loop_local);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn findLibCAsync(event_loop_local: *EventLoopLocal) void {
+ const libc = (await (async event_loop_local.getNativeLibC() catch unreachable)) catch |err| {
+ stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1);
+ os.exit(1);
+ };
+ libc.render(stdout) catch os.exit(1);
+}
+
+fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_fmt);
+ try stdout.write(usage_fmt);
os.exit(0);
}
+ const color = blk: {
+ if (flags.single("color")) |color_flag| {
+ if (mem.eql(u8, color_flag, "auto")) {
+ break :blk errmsg.Color.Auto;
+ } else if (mem.eql(u8, color_flag, "on")) {
+ break :blk errmsg.Color.On;
+ } else if (mem.eql(u8, color_flag, "off")) {
+ break :blk errmsg.Color.Off;
+ } else unreachable;
+ } else {
+ break :blk errmsg.Color.Auto;
+ }
+ };
+
+ if (flags.present("stdin")) {
+ if (flags.positionals.len != 0) {
+ try stderr.write("cannot use --stdin with positional arguments\n");
+ os.exit(1);
+ }
+
+ var stdin_file = try io.getStdIn();
+ var stdin = io.FileInStream.init(&stdin_file);
+
+ const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize));
+ defer allocator.free(source_code);
+
+ var tree = std.zig.parse(allocator, source_code) catch |err| {
+ try stderr.print("error parsing stdin: {}\n", err);
+ os.exit(1);
+ };
+ defer tree.deinit();
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, "<stdin>");
+ defer msg.destroy();
+
+ try msg.printToFile(&stderr_file, color);
+ }
+ if (tree.errors.len != 0) {
+ os.exit(1);
+ }
+
+ _ = try std.zig.render(allocator, stdout, &tree);
+ return;
+ }
+
if (flags.positionals.len == 0) {
try stderr.write("expected at least one source file argument\n");
os.exit(1);
}
+ var fmt = Fmt{
+ .seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator),
+ .queue = std.LinkedList([]const u8).init(),
+ .any_error = false,
+ };
+
for (flags.positionals.toSliceConst()) |file_path| {
+ try fmt.addToQueue(file_path);
+ }
+
+ while (fmt.queue.popFirst()) |node| {
+ const file_path = node.data;
+
var file = try os.File.openRead(allocator, file_path);
defer file.close();
- const source_code = io.readFileAlloc(allocator, file_path) catch |err| {
- try stderr.print("unable to open '{}': {}", file_path, err);
- continue;
+ const source_code = io.readFileAlloc(allocator, file_path) catch |err| switch (err) {
+ error.IsDir => {
+ try fmt.addDirToQueue(file_path);
+ continue;
+ },
+ else => {
+ try stderr.print("unable to open '{}': {}\n", file_path, err);
+ fmt.any_error = true;
+ continue;
+ },
};
defer allocator.free(source_code);
- var tokenizer = std.zig.Tokenizer.init(source_code);
- var parser = std.zig.Parser.init(&tokenizer, allocator, file_path);
- defer parser.deinit();
-
- var tree = parser.parse() catch |err| {
+ var tree = std.zig.parse(allocator, source_code) catch |err| {
try stderr.print("error parsing file '{}': {}\n", file_path, err);
+ fmt.any_error = true;
continue;
};
defer tree.deinit();
- var original_file_backup = try Buffer.init(allocator, file_path);
- defer original_file_backup.deinit();
- try original_file_backup.append(".backup");
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, file_path);
+ defer msg.destroy();
- try os.rename(allocator, file_path, original_file_backup.toSliceConst());
-
- try stderr.print("{}\n", file_path);
-
- // TODO: BufferedAtomicFile has some access problems.
- var out_file = try os.File.openWrite(allocator, file_path);
- defer out_file.close();
+ try msg.printToFile(&stderr_file, color);
+ }
+ if (tree.errors.len != 0) {
+ fmt.any_error = true;
+ continue;
+ }
- var out_file_stream = io.FileOutStream.init(&out_file);
- try parser.renderSource(out_file_stream.stream, tree.root_node);
+ const baf = try io.BufferedAtomicFile.create(allocator, file_path);
+ defer baf.destroy();
- if (!flags.present("keep-backups")) {
- try os.deleteFile(allocator, original_file_backup.toSliceConst());
+ const anything_changed = try std.zig.render(allocator, baf.stream(), &tree);
+ if (anything_changed) {
+ try stderr.print("{}\n", file_path);
+ try baf.finish();
}
}
+
+ if (fmt.any_error) {
+ os.exit(1);
+ }
}
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
-fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
inline while (i < @memberCount(builtin.Arch)) : (i += 1) {
comptime const arch_tag = @memberName(builtin.Arch, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, arch_tag, @tagName(builtin.arch))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, arch_tag, @tagName(builtin.arch))) " (native)\n" else "\n";
try stdout.print(" {}{}", arch_tag, native_str);
}
@@ -725,8 +750,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
inline while (i < @memberCount(builtin.Os)) : (i += 1) {
comptime const os_tag = @memberName(builtin.Os, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, os_tag, @tagName(builtin.os))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, os_tag, @tagName(builtin.os))) " (native)\n" else "\n";
try stdout.print(" {}{}", os_tag, native_str);
}
@@ -739,176 +763,23 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
inline while (i < @memberCount(builtin.Environ)) : (i += 1) {
comptime const environ_tag = @memberName(builtin.Environ, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, environ_tag, @tagName(builtin.environ))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, environ_tag, @tagName(builtin.environ))) " (native)\n" else "\n";
try stdout.print(" {}{}", environ_tag, native_str);
}
}
}
-// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-
-fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
-// cmd:test ////////////////////////////////////////////////////////////////////////////////////////
-
-const usage_test =
- \\usage: zig test [file]...
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
- ;
-
-const args_test_spec = []Flag {
- Flag.Bool("--help"),
-};
-
-
-fn cmdTest(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_test);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- // compile the test program into the cache and run
-
- // NOTE: May be overlap with buildOutput, take the shared part out.
- try stderr.print("testing file {}\n", flags.positionals.at(0));
-}
-
-// cmd:run /////////////////////////////////////////////////////////////////////////////////////////
-
-// Run should be simple and not expose the full set of arguments provided by build-exe. If specific
-// build requirements are need, the user should `build-exe` then `run` manually.
-const usage_run =
- \\usage: zig run [file] -- <runtime args>
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
- ;
-
-const args_run_spec = []Flag {
- Flag.Bool("--help"),
-};
-
-
-fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
- var compile_args = args;
- var runtime_args: []const []const u8 = []const []const u8 {};
-
- for (args) |argv, i| {
- if (mem.eql(u8, argv, "--")) {
- compile_args = args[0..i];
- runtime_args = args[i+1..];
- break;
- }
- }
- var flags = try Args.parse(allocator, args_run_spec, compile_args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_run);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- try stderr.print("runtime args:\n");
- for (runtime_args) |cargs| {
- try stderr.print("{}\n", cargs);
- }
-}
-
-// cmd:translate-c /////////////////////////////////////////////////////////////////////////////////
-
-const usage_translate_c =
- \\usage: zig translate-c [file]
- \\
- \\Options:
- \\ --help Print this help and exit
- \\ --enable-timing-info Print timing diagnostics
- \\ --output [path] Output file to write generated zig file (default: stdout)
- \\
- \\
- ;
-
-const args_translate_c_spec = []Flag {
- Flag.Bool("--help"),
- Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
- Flag.Arg1("--output"),
-};
-
-fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_translate_c_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_translate_c);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one c source file\n");
- os.exit(1);
- }
-
- // set up codegen
-
- const zig_root_source_file = null;
-
- // NOTE: translate-c shouldn't require setting up the full codegen instance as it does in
- // the C++ compiler.
-
- // codegen_create(g);
- // codegen_set_out_name(g, null);
- // codegen_translate_c(g, flags.positional.at(0))
-
- var output_stream = stdout;
- if (flags.single("output")) |output_file| {
- var file = try os.File.openWrite(allocator, output_file);
- defer file.close();
-
- var file_stream = io.FileOutStream.init(&file);
- // TODO: Not being set correctly, still stdout
- output_stream = &file_stream.stream;
- }
-
- // ast_render(g, output_stream, g->root_import->root, 4);
- try output_stream.write("pub const example = 10;\n");
-
- if (flags.present("enable-timing-info")) {
- // codegen_print_timing_info(g, stdout);
- try stderr.write("printing timing info for translate-c\n");
- }
-}
-
-// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
+const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void {
- try stderr.write(usage);
+fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
+ try stdout.write(usage);
}
-// cmd:zen /////////////////////////////////////////////////////////////////////////////////////////
-
const info_zen =
\\
\\ * Communicate intent precisely.
@@ -924,14 +795,12 @@ const info_zen =
\\ * Together we serve end users.
\\
\\
- ;
+;
-fn cmdZen(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
-// cmd:internal ////////////////////////////////////////////////////////////////////////////////////
-
const usage_internal =
\\usage: zig internal [subcommand]
\\
@@ -939,17 +808,18 @@ const usage_internal =
\\ build-info Print static compiler build-info
\\
\\
- ;
+;
-fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
}
- const sub_commands = []Command {
- Command { .name = "build-info", .exec = cmdInternalBuildInfo },
- };
+ const sub_commands = []Command{Command{
+ .name = "build-info",
+ .exec = cmdInternalBuildInfo,
+ }};
for (sub_commands) |sub_command| {
if (mem.eql(u8, sub_command.name, args[0])) {
@@ -962,7 +832,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
-fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}
@@ -973,7 +843,7 @@ fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
\\ZIG_C_HEADER_FILES {}
\\ZIG_DIA_GUIDS_LIB {}
\\
- ,
+ ,
std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
std.cstr.toSliceConst(c.ZIG_LLVM_CONFIG_EXE),
@@ -984,3 +854,27 @@ fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
);
}
+
+const CliPkg = struct {
+ name: []const u8,
+ path: []const u8,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
+
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
+ var pkg = try allocator.create(CliPkg{
+ .name = name,
+ .path = path,
+ .children = ArrayList(*CliPkg).init(allocator),
+ .parent = parent,
+ });
+ return pkg;
+ }
+
+ pub fn deinit(self: *CliPkg) void {
+ for (self.children.toSliceConst()) |child| {
+ child.deinit();
+ }
+ self.children.deinit();
+ }
+};
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
deleted file mode 100644
index eec30749e2..0000000000
--- a/src-self-hosted/module.zig
+++ /dev/null
@@ -1,326 +0,0 @@
-const std = @import("std");
-const os = std.os;
-const io = std.io;
-const mem = std.mem;
-const Buffer = std.Buffer;
-const llvm = @import("llvm.zig");
-const c = @import("c.zig");
-const builtin = @import("builtin");
-const Target = @import("target.zig").Target;
-const warn = std.debug.warn;
-const Tokenizer = std.zig.Tokenizer;
-const Token = std.zig.Token;
-const Parser = std.zig.Parser;
-const ArrayList = std.ArrayList;
-
-pub const Module = struct {
- allocator: &mem.Allocator,
- name: Buffer,
- root_src_path: ?[]const u8,
- module: llvm.ModuleRef,
- context: llvm.ContextRef,
- builder: llvm.BuilderRef,
- target: Target,
- build_mode: builtin.Mode,
- zig_lib_dir: []const u8,
-
- version_major: u32,
- version_minor: u32,
- version_patch: u32,
-
- linker_script: ?[]const u8,
- cache_dir: []const u8,
- libc_lib_dir: ?[]const u8,
- libc_static_lib_dir: ?[]const u8,
- libc_include_dir: ?[]const u8,
- msvc_lib_dir: ?[]const u8,
- kernel32_lib_dir: ?[]const u8,
- dynamic_linker: ?[]const u8,
- out_h_path: ?[]const u8,
-
- is_test: bool,
- each_lib_rpath: bool,
- strip: bool,
- is_static: bool,
- linker_rdynamic: bool,
-
- clang_argv: []const []const u8,
- llvm_argv: []const []const u8,
- lib_dirs: []const []const u8,
- rpath_list: []const []const u8,
- assembly_files: []const []const u8,
- link_objects: []const []const u8,
-
- windows_subsystem_windows: bool,
- windows_subsystem_console: bool,
-
- link_libs_list: ArrayList(&LinkLib),
- libc_link_lib: ?&LinkLib,
-
- err_color: ErrColor,
-
- verbose_tokenize: bool,
- verbose_ast_tree: bool,
- verbose_ast_fmt: bool,
- verbose_cimport: bool,
- verbose_ir: bool,
- verbose_llvm_ir: bool,
- verbose_link: bool,
-
- darwin_frameworks: []const []const u8,
- darwin_version_min: DarwinVersionMin,
-
- test_filters: []const []const u8,
- test_name_prefix: ?[]const u8,
-
- emit_file_type: Emit,
-
- kind: Kind,
-
- pub const DarwinVersionMin = union(enum) {
- None,
- MacOS: []const u8,
- Ios: []const u8,
- };
-
- pub const Kind = enum {
- Exe,
- Lib,
- Obj,
- };
-
- pub const ErrColor = enum {
- Auto,
- Off,
- On,
- };
-
- pub const LinkLib = struct {
- name: []const u8,
- path: ?[]const u8,
- /// the list of symbols we depend on from this lib
- symbols: ArrayList([]u8),
- provided_explicitly: bool,
- };
-
- pub const Emit = enum {
- Binary,
- Assembly,
- LlvmIr,
- };
-
- pub const CliPkg = struct {
- name: []const u8,
- path: []const u8,
- children: ArrayList(&CliPkg),
- parent: ?&CliPkg,
-
- pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg {
- var pkg = try allocator.create(CliPkg);
- pkg.name = name;
- pkg.path = path;
- pkg.children = ArrayList(&CliPkg).init(allocator);
- pkg.parent = parent;
- return pkg;
- }
-
- pub fn deinit(self: &CliPkg) void {
- for (self.children.toSliceConst()) |child| {
- child.deinit();
- }
- self.children.deinit();
- }
- };
-
- pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target,
- kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module
- {
- var name_buffer = try Buffer.init(allocator, name);
- errdefer name_buffer.deinit();
-
- const context = c.LLVMContextCreate() ?? return error.OutOfMemory;
- errdefer c.LLVMContextDispose(context);
-
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory;
- errdefer c.LLVMDisposeModule(module);
-
- const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory;
- errdefer c.LLVMDisposeBuilder(builder);
-
- const module_ptr = try allocator.create(Module);
- errdefer allocator.destroy(module_ptr);
-
- *module_ptr = Module {
- .allocator = allocator,
- .name = name_buffer,
- .root_src_path = root_src_path,
- .module = module,
- .context = context,
- .builder = builder,
- .target = *target,
- .kind = kind,
- .build_mode = build_mode,
- .zig_lib_dir = zig_lib_dir,
- .cache_dir = cache_dir,
-
- .version_major = 0,
- .version_minor = 0,
- .version_patch = 0,
-
- .verbose_tokenize = false,
- .verbose_ast_tree = false,
- .verbose_ast_fmt = false,
- .verbose_cimport = false,
- .verbose_ir = false,
- .verbose_llvm_ir = false,
- .verbose_link = false,
-
- .linker_script = null,
- .libc_lib_dir = null,
- .libc_static_lib_dir = null,
- .libc_include_dir = null,
- .msvc_lib_dir = null,
- .kernel32_lib_dir = null,
- .dynamic_linker = null,
- .out_h_path = null,
- .is_test = false,
- .each_lib_rpath = false,
- .strip = false,
- .is_static = false,
- .linker_rdynamic = false,
- .clang_argv = [][]const u8{},
- .llvm_argv = [][]const u8{},
- .lib_dirs = [][]const u8{},
- .rpath_list = [][]const u8{},
- .assembly_files = [][]const u8{},
- .link_objects = [][]const u8{},
- .windows_subsystem_windows = false,
- .windows_subsystem_console = false,
- .link_libs_list = ArrayList(&LinkLib).init(allocator),
- .libc_link_lib = null,
- .err_color = ErrColor.Auto,
- .darwin_frameworks = [][]const u8{},
- .darwin_version_min = DarwinVersionMin.None,
- .test_filters = [][]const u8{},
- .test_name_prefix = null,
- .emit_file_type = Emit.Binary,
- };
- return module_ptr;
- }
-
- fn dump(self: &Module) void {
- c.LLVMDumpModule(self.module);
- }
-
- pub fn destroy(self: &Module) void {
- c.LLVMDisposeBuilder(self.builder);
- c.LLVMDisposeModule(self.module);
- c.LLVMContextDispose(self.context);
- self.name.deinit();
-
- self.allocator.destroy(self);
- }
-
- pub fn build(self: &Module) !void {
- if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator,
- [][]const []const u8 { [][]const u8{"zig (LLVM option parsing)"}, self.llvm_argv, });
- defer c_compatible_args.deinit();
- c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
- }
-
- const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path");
- const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
- try printError("unable to get real path '{}': {}", root_src_path, err);
- return err;
- };
- errdefer self.allocator.free(root_src_real_path);
-
- const source_code = io.readFileAlloc(self.allocator, root_src_real_path) catch |err| {
- try printError("unable to open '{}': {}", root_src_real_path, err);
- return err;
- };
- errdefer self.allocator.free(source_code);
-
- warn("====input:====\n");
-
- warn("{}", source_code);
-
- warn("====tokenization:====\n");
- {
- var tokenizer = Tokenizer.init(source_code);
- while (true) {
- const token = tokenizer.next();
- tokenizer.dump(token);
- if (token.id == Token.Id.Eof) {
- break;
- }
- }
- }
-
- warn("====parse:====\n");
-
- var tokenizer = Tokenizer.init(source_code);
- var parser = Parser.init(&tokenizer, self.allocator, root_src_real_path);
- defer parser.deinit();
-
- var tree = try parser.parse();
- defer tree.deinit();
-
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try parser.renderAst(out_stream, tree.root_node);
-
- warn("====fmt:====\n");
- try parser.renderSource(out_stream, tree.root_node);
-
- warn("====ir:====\n");
- warn("TODO\n\n");
-
- warn("====llvm ir:====\n");
- self.dump();
-
- }
-
- pub fn link(self: &Module, out_file: ?[]const u8) !void {
- warn("TODO link");
- return error.Todo;
- }
-
- pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib {
- const is_libc = mem.eql(u8, name, "c");
-
- if (is_libc) {
- if (self.libc_link_lib) |libc_link_lib| {
- return libc_link_lib;
- }
- }
-
- for (self.link_libs_list.toSliceConst()) |existing_lib| {
- if (mem.eql(u8, name, existing_lib.name)) {
- return existing_lib;
- }
- }
-
- const link_lib = try self.allocator.create(LinkLib);
- *link_lib = LinkLib {
- .name = name,
- .path = null,
- .provided_explicitly = provided_explicitly,
- .symbols = ArrayList([]u8).init(self.allocator),
- };
- try self.link_libs_list.append(link_lib);
- if (is_libc) {
- self.libc_link_lib = link_lib;
- }
- return link_lib;
- }
-};
-
-fn printError(comptime format: []const u8, args: ...) !void {
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try out_stream.print(format, args);
-}
diff --git a/src-self-hosted/package.zig b/src-self-hosted/package.zig
new file mode 100644
index 0000000000..720b279651
--- /dev/null
+++ b/src-self-hosted/package.zig
@@ -0,0 +1,29 @@
+const std = @import("std");
+const mem = std.mem;
+const assert = std.debug.assert;
+const Buffer = std.Buffer;
+
+pub const Package = struct {
+ root_src_dir: Buffer,
+ root_src_path: Buffer,
+
+ /// relative to root_src_dir
+ table: Table,
+
+ pub const Table = std.HashMap([]const u8, *Package, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ /// makes internal copies of root_src_dir and root_src_path
+ /// allocator should be an arena allocator because Package never frees anything
+ pub fn create(allocator: *mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package {
+ return allocator.create(Package{
+ .root_src_dir = try Buffer.init(allocator, root_src_dir),
+ .root_src_path = try Buffer.init(allocator, root_src_path),
+ .table = Table.init(allocator),
+ });
+ }
+
+ pub fn add(self: *Package, name: []const u8, package: *Package) !void {
+ const entry = try self.table.put(try mem.dupe(self.table.allocator, u8, name), package);
+ assert(entry == null);
+ }
+};
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 05e586daae..a38e765c6e 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,16 +1,396 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Allocator = mem.Allocator;
+const Decl = @import("decl.zig").Decl;
+const Compilation = @import("compilation.zig").Compilation;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const ir = @import("ir.zig");
+const Span = @import("errmsg.zig").Span;
+const assert = std.debug.assert;
+const event = std.event;
+const llvm = @import("llvm.zig");
+
pub const Scope = struct {
id: Id,
- parent: &Scope,
+ parent: ?*Scope,
+ ref_count: std.atomic.Int(usize),
+
+ /// Thread-safe
+ pub fn ref(base: *Scope) void {
+ _ = base.ref_count.incr();
+ }
+
+ /// Thread-safe
+ pub fn deref(base: *Scope, comp: *Compilation) void {
+ if (base.ref_count.decr() == 1) {
+ if (base.parent) |parent| parent.deref(comp);
+ switch (base.id) {
+ Id.Root => @fieldParentPtr(Root, "base", base).destroy(comp),
+ Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(comp),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(comp),
+ Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
+ Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
+ Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
+ Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
+ }
+ }
+ }
+
+ pub fn findRoot(base: *Scope) *Root {
+ var scope = base;
+ while (scope.parent) |parent| {
+ scope = parent;
+ }
+ assert(scope.id == Id.Root);
+ return @fieldParentPtr(Root, "base", scope);
+ }
+
+ pub fn findFnDef(base: *Scope) ?*FnDef {
+ var scope = base;
+ while (true) {
+ switch (scope.id) {
+ Id.FnDef => return @fieldParentPtr(FnDef, "base", scope),
+ Id.Root, Id.Decls => return null,
+
+ Id.Block,
+ Id.Defer,
+ Id.DeferExpr,
+ Id.CompTime,
+ Id.Var,
+ => scope = scope.parent.?,
+ }
+ }
+ }
+
+ pub fn findDeferExpr(base: *Scope) ?*DeferExpr {
+ var scope = base;
+ while (true) {
+ switch (scope.id) {
+ Id.DeferExpr => return @fieldParentPtr(DeferExpr, "base", scope),
+
+ Id.FnDef,
+ Id.Decls,
+ => return null,
+
+ Id.Block,
+ Id.Defer,
+ Id.CompTime,
+ Id.Root,
+ Id.Var,
+ => scope = scope.parent orelse return null,
+ }
+ }
+ }
+
+ fn init(base: *Scope, id: Id, parent: *Scope) void {
+ base.* = Scope{
+ .id = id,
+ .parent = parent,
+ .ref_count = std.atomic.Int(usize).init(1),
+ };
+ parent.ref();
+ }
pub const Id = enum {
+ Root,
Decls,
Block,
- Defer,
- DeferExpr,
- VarDecl,
- CImport,
- Loop,
FnDef,
CompTime,
+ Defer,
+ DeferExpr,
+ Var,
+ };
+
+ pub const Root = struct {
+ base: Scope,
+ tree: *ast.Tree,
+ realpath: []const u8,
+
+ /// Creates a Root scope with 1 reference
+ /// Takes ownership of realpath
+ /// Takes ownership of tree, will deinit and destroy when done.
+ pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
+ const self = try comp.gpa().createOne(Root);
+ self.* = Root{
+ .base = Scope{
+ .id = Id.Root,
+ .parent = null,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .tree = tree,
+ .realpath = realpath,
+ };
+
+ return self;
+ }
+
+ pub fn destroy(self: *Root, comp: *Compilation) void {
+ comp.gpa().free(self.tree.source);
+ self.tree.deinit();
+ comp.gpa().destroy(self.tree);
+ comp.gpa().free(self.realpath);
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Decls = struct {
+ base: Scope,
+
+ /// The lock must be respected for writing. However once name_future resolves,
+ /// readers can freely access it.
+ table: event.Locked(Decl.Table),
+
+ /// Once this future is resolved, the table is complete and available for unlocked
+ /// read-only access. It does not mean all the decls are resolved; it means only that
+ /// the table has all the names. Each decl in the table has its own resolution state.
+ name_future: event.Future(void),
+
+ /// Creates a Decls scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
+ const self = try comp.gpa().createOne(Decls);
+ self.* = Decls{
+ .base = undefined,
+ .table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
+ .name_future = event.Future(void).init(comp.loop),
+ };
+ self.base.init(Id.Decls, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Decls, comp: *Compilation) void {
+ self.table.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn getTableReadOnly(self: *Decls) *Decl.Table {
+ _ = await (async self.name_future.get() catch unreachable);
+ return &self.table.private_data;
+ }
+ };
+
+ pub const Block = struct {
+ base: Scope,
+ incoming_values: std.ArrayList(*ir.Inst),
+ incoming_blocks: std.ArrayList(*ir.BasicBlock),
+ end_block: *ir.BasicBlock,
+ is_comptime: *ir.Inst,
+
+ safety: Safety,
+
+ const Safety = union(enum) {
+ Auto,
+ Manual: Manual,
+
+ const Manual = struct {
+ /// the source span that disabled the safety value
+ span: Span,
+
+ /// whether safety is enabled
+ enabled: bool,
+ };
+
+ fn get(self: Safety, comp: *Compilation) bool {
+ return switch (self) {
+ Safety.Auto => switch (comp.build_mode) {
+ builtin.Mode.Debug,
+ builtin.Mode.ReleaseSafe,
+ => true,
+ builtin.Mode.ReleaseFast,
+ builtin.Mode.ReleaseSmall,
+ => false,
+ },
+ @TagType(Safety).Manual => |man| man.enabled,
+ };
+ }
+ };
+
+ /// Creates a Block scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*Block {
+ const self = try comp.gpa().createOne(Block);
+ self.* = Block{
+ .base = undefined,
+ .incoming_values = undefined,
+ .incoming_blocks = undefined,
+ .end_block = undefined,
+ .is_comptime = undefined,
+ .safety = Safety.Auto,
+ };
+ self.base.init(Id.Block, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const FnDef = struct {
+ base: Scope,
+
+ /// This reference is not counted so that the scope can get destroyed with the function
+ fn_val: ?*Value.Fn,
+
+ /// Creates a FnDef scope with 1 reference
+ /// Must set the fn_val later
+ pub fn create(comp: *Compilation, parent: *Scope) !*FnDef {
+ const self = try comp.gpa().createOne(FnDef);
+ self.* = FnDef{
+ .base = undefined,
+ .fn_val = null,
+ };
+ self.base.init(Id.FnDef, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *FnDef, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Scope,
+
+ /// Creates a CompTime scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*CompTime {
+ const self = try comp.gpa().createOne(CompTime);
+ self.* = CompTime{ .base = undefined };
+ self.base.init(Id.CompTime, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *CompTime, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Defer = struct {
+ base: Scope,
+ defer_expr_scope: *DeferExpr,
+ kind: Kind,
+
+ pub const Kind = enum {
+ ScopeExit,
+ ErrorExit,
+ };
+
+ /// Creates a Defer scope with 1 reference
+ pub fn create(
+ comp: *Compilation,
+ parent: *Scope,
+ kind: Kind,
+ defer_expr_scope: *DeferExpr,
+ ) !*Defer {
+ const self = try comp.gpa().createOne(Defer);
+ self.* = Defer{
+ .base = undefined,
+ .defer_expr_scope = defer_expr_scope,
+ .kind = kind,
+ };
+ self.base.init(Id.Defer, parent);
+ defer_expr_scope.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Defer, comp: *Compilation) void {
+ self.defer_expr_scope.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const DeferExpr = struct {
+ base: Scope,
+ expr_node: *ast.Node,
+ reported_err: bool,
+
+ /// Creates a DeferExpr scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope, expr_node: *ast.Node) !*DeferExpr {
+ const self = try comp.gpa().createOne(DeferExpr);
+ self.* = DeferExpr{
+ .base = undefined,
+ .expr_node = expr_node,
+ .reported_err = false,
+ };
+ self.base.init(Id.DeferExpr, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *DeferExpr, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Var = struct {
+ base: Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ data: Data,
+
+ pub const Data = union(enum) {
+ Param: Param,
+ Const: *Value,
+ };
+
+ pub const Param = struct {
+ index: usize,
+ typ: *Type,
+ llvm_value: llvm.ValueRef,
+ };
+
+ pub fn createParam(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ param_index: usize,
+ param_type: *Type,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{
+ .Param = Param{
+ .index = param_index,
+ .typ = param_type,
+ .llvm_value = undefined,
+ },
+ };
+ return self;
+ }
+
+ pub fn createConst(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ value: *Value,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{ .Const = value };
+ value.ref();
+ return self;
+ }
+
+ fn create(comp: *Compilation, parent: *Scope, name: []const u8, src_node: *ast.Node) !*Var {
+ const self = try comp.gpa().createOne(Var);
+ self.* = Var{
+ .base = undefined,
+ .name = name,
+ .src_node = src_node,
+ .data = undefined,
+ };
+ self.base.init(Id.Var, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Var, comp: *Compilation) void {
+ switch (self.data) {
+ Data.Param => {},
+ Data.Const => |value| value.deref(comp),
+ }
+ comp.gpa().destroy(self);
+ }
};
};
diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig
index 375b48f10d..0cc8d02a62 100644
--- a/src-self-hosted/target.zig
+++ b/src-self-hosted/target.zig
@@ -1,60 +1,562 @@
+const std = @import("std");
const builtin = @import("builtin");
-const c = @import("c.zig");
+const llvm = @import("llvm.zig");
+const CInt = @import("c_int.zig").CInt;
-pub const CrossTarget = struct {
- arch: builtin.Arch,
- os: builtin.Os,
- environ: builtin.Environ,
+pub const FloatAbi = enum {
+ Hard,
+ Soft,
+ SoftFp,
};
pub const Target = union(enum) {
Native,
- Cross: CrossTarget,
+ Cross: Cross,
- pub fn oFileExt(self: &const Target) []const u8 {
- const environ = switch (*self) {
- Target.Native => builtin.environ,
- Target.Cross => |t| t.environ,
- };
- return switch (environ) {
- builtin.Environ.msvc => ".obj",
+ pub const Cross = struct {
+ arch: builtin.Arch,
+ os: builtin.Os,
+ environ: builtin.Environ,
+ object_format: builtin.ObjectFormat,
+ };
+
+ pub fn objFileExt(self: Target) []const u8 {
+ return switch (self.getObjectFormat()) {
+ builtin.ObjectFormat.coff => ".obj",
else => ".o",
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
- return switch (*self) {
+ pub fn libFileExt(self: Target, is_static: bool) []const u8 {
+ return switch (self.getOs()) {
+ builtin.Os.windows => if (is_static) ".lib" else ".dll",
+ else => if (is_static) ".a" else ".so",
+ };
+ }
+
+ pub fn getOs(self: Target) builtin.Os {
+ return switch (self) {
Target.Native => builtin.os,
- Target.Cross => |t| t.os,
+ @TagType(Target).Cross => |t| t.os,
+ };
+ }
+
+ pub fn getArch(self: Target) builtin.Arch {
+ return switch (self) {
+ Target.Native => builtin.arch,
+ @TagType(Target).Cross => |t| t.arch,
+ };
+ }
+
+ pub fn getEnviron(self: Target) builtin.Environ {
+ return switch (self) {
+ Target.Native => builtin.environ,
+ @TagType(Target).Cross => |t| t.environ,
+ };
+ }
+
+ pub fn getObjectFormat(self: Target) builtin.ObjectFormat {
+ return switch (self) {
+ Target.Native => builtin.object_format,
+ @TagType(Target).Cross => |t| t.object_format,
+ };
+ }
+
+ pub fn isWasm(self: Target) bool {
+ return switch (self.getArch()) {
+ builtin.Arch.wasm32, builtin.Arch.wasm64 => true,
+ else => false,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
-};
-pub fn initializeAll() void {
- c.LLVMInitializeAllTargets();
- c.LLVMInitializeAllTargetInfos();
- c.LLVMInitializeAllTargetMCs();
- c.LLVMInitializeAllAsmPrinters();
- c.LLVMInitializeAllAsmParsers();
-}
+ /// TODO expose the arch and subarch separately
+ pub fn isArmOrThumb(self: Target) bool {
+ return switch (self.getArch()) {
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.thumb,
+ builtin.Arch.thumbeb,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn initializeAll() void {
+ llvm.InitializeAllTargets();
+ llvm.InitializeAllTargetInfos();
+ llvm.InitializeAllTargetMCs();
+ llvm.InitializeAllAsmPrinters();
+ llvm.InitializeAllAsmParsers();
+ }
+
+ pub fn getTriple(self: Target, allocator: *std.mem.Allocator) !std.Buffer {
+ var result = try std.Buffer.initSize(allocator, 0);
+ errdefer result.deinit();
+
+ // LLVM WebAssembly output support requires the target to be activated at
+ // build type with -DCMAKE_LLVM_EXPIERMENTAL_TARGETS_TO_BUILD=WebAssembly.
+ //
+ // LLVM determines the output format based on the environment suffix,
+ // defaulting to an object based on the architecture. The default format in
+ // LLVM 6 sets the wasm arch output incorrectly to ELF. We need to
+ // explicitly set this ourself in order for it to work.
+ //
+ // This is fixed in LLVM 7 and you will be able to get wasm output by
+ // using the target triple `wasm32-unknown-unknown-unknown`.
+ const env_name = if (self.isWasm()) "wasm" else @tagName(self.getEnviron());
+
+ var out = &std.io.BufferOutStream.init(&result).stream;
+ try out.print("{}-unknown-{}-{}", @tagName(self.getArch()), @tagName(self.getOs()), env_name);
+
+ return result;
+ }
+
+ pub fn is64bit(self: Target) bool {
+ return self.getArchPtrBitWidth() == 64;
+ }
+
+ pub fn getArchPtrBitWidth(self: Target) u32 {
+ switch (self.getArch()) {
+ builtin.Arch.avr,
+ builtin.Arch.msp430,
+ => return 16,
+
+ builtin.Arch.arc,
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.hexagon,
+ builtin.Arch.le32,
+ builtin.Arch.mips,
+ builtin.Arch.mipsel,
+ builtin.Arch.nios2,
+ builtin.Arch.powerpc,
+ builtin.Arch.r600,
+ builtin.Arch.riscv32,
+ builtin.Arch.sparc,
+ builtin.Arch.sparcel,
+ builtin.Arch.tce,
+ builtin.Arch.tcele,
+ builtin.Arch.thumb,
+ builtin.Arch.thumbeb,
+ builtin.Arch.i386,
+ builtin.Arch.xcore,
+ builtin.Arch.nvptx,
+ builtin.Arch.amdil,
+ builtin.Arch.hsail,
+ builtin.Arch.spir,
+ builtin.Arch.kalimbav3,
+ builtin.Arch.kalimbav4,
+ builtin.Arch.kalimbav5,
+ builtin.Arch.shave,
+ builtin.Arch.lanai,
+ builtin.Arch.wasm32,
+ builtin.Arch.renderscript32,
+ => return 32,
+
+ builtin.Arch.aarch64,
+ builtin.Arch.aarch64_be,
+ builtin.Arch.mips64,
+ builtin.Arch.mips64el,
+ builtin.Arch.powerpc64,
+ builtin.Arch.powerpc64le,
+ builtin.Arch.riscv64,
+ builtin.Arch.x86_64,
+ builtin.Arch.nvptx64,
+ builtin.Arch.le64,
+ builtin.Arch.amdil64,
+ builtin.Arch.hsail64,
+ builtin.Arch.spir64,
+ builtin.Arch.wasm64,
+ builtin.Arch.renderscript64,
+ builtin.Arch.amdgcn,
+ builtin.Arch.bpfel,
+ builtin.Arch.bpfeb,
+ builtin.Arch.sparcv9,
+ builtin.Arch.s390x,
+ => return 64,
+ }
+ }
+
+ pub fn getFloatAbi(self: Target) FloatAbi {
+ return switch (self.getEnviron()) {
+ builtin.Environ.gnueabihf,
+ builtin.Environ.eabihf,
+ builtin.Environ.musleabihf,
+ => FloatAbi.Hard,
+ else => FloatAbi.Soft,
+ };
+ }
+
+ pub fn getDynamicLinkerPath(self: Target) ?[]const u8 {
+ const env = self.getEnviron();
+ const arch = self.getArch();
+ switch (env) {
+ builtin.Environ.android => {
+ if (self.is64bit()) {
+ return "/system/bin/linker64";
+ } else {
+ return "/system/bin/linker";
+ }
+ },
+ builtin.Environ.gnux32 => {
+ if (arch == builtin.Arch.x86_64) {
+ return "/libx32/ld-linux-x32.so.2";
+ }
+ },
+ builtin.Environ.musl,
+ builtin.Environ.musleabi,
+ builtin.Environ.musleabihf,
+ => {
+ if (arch == builtin.Arch.x86_64) {
+ return "/lib/ld-musl-x86_64.so.1";
+ }
+ },
+ else => {},
+ }
+ switch (arch) {
+ builtin.Arch.i386,
+ builtin.Arch.sparc,
+ builtin.Arch.sparcel,
+ => return "/lib/ld-linux.so.2",
+
+ builtin.Arch.aarch64 => return "/lib/ld-linux-aarch64.so.1",
+ builtin.Arch.aarch64_be => return "/lib/ld-linux-aarch64_be.so.1",
+
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.thumb,
+ => return switch (self.getFloatAbi()) {
+ FloatAbi.Hard => return "/lib/ld-linux-armhf.so.3",
+ else => return "/lib/ld-linux.so.3",
+ },
+
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.thumbeb,
+ => return switch (self.getFloatAbi()) {
+ FloatAbi.Hard => return "/lib/ld-linux-armhf.so.3",
+ else => return "/lib/ld-linux.so.3",
+ },
+
+ builtin.Arch.mips,
+ builtin.Arch.mipsel,
+ builtin.Arch.mips64,
+ builtin.Arch.mips64el,
+ => return null,
+
+ builtin.Arch.powerpc => return "/lib/ld.so.1",
+ builtin.Arch.powerpc64 => return "/lib64/ld64.so.2",
+ builtin.Arch.powerpc64le => return "/lib64/ld64.so.2",
+ builtin.Arch.s390x => return "/lib64/ld64.so.1",
+ builtin.Arch.sparcv9 => return "/lib64/ld-linux.so.2",
+ builtin.Arch.x86_64 => return "/lib64/ld-linux-x86-64.so.2",
+
+ builtin.Arch.arc,
+ builtin.Arch.avr,
+ builtin.Arch.bpfel,
+ builtin.Arch.bpfeb,
+ builtin.Arch.hexagon,
+ builtin.Arch.msp430,
+ builtin.Arch.nios2,
+ builtin.Arch.r600,
+ builtin.Arch.amdgcn,
+ builtin.Arch.riscv32,
+ builtin.Arch.riscv64,
+ builtin.Arch.tce,
+ builtin.Arch.tcele,
+ builtin.Arch.xcore,
+ builtin.Arch.nvptx,
+ builtin.Arch.nvptx64,
+ builtin.Arch.le32,
+ builtin.Arch.le64,
+ builtin.Arch.amdil,
+ builtin.Arch.amdil64,
+ builtin.Arch.hsail,
+ builtin.Arch.hsail64,
+ builtin.Arch.spir,
+ builtin.Arch.spir64,
+ builtin.Arch.kalimbav3,
+ builtin.Arch.kalimbav4,
+ builtin.Arch.kalimbav5,
+ builtin.Arch.shave,
+ builtin.Arch.lanai,
+ builtin.Arch.wasm32,
+ builtin.Arch.wasm64,
+ builtin.Arch.renderscript32,
+ builtin.Arch.renderscript64,
+ => return null,
+ }
+ }
+
+ pub fn llvmTargetFromTriple(triple: std.Buffer) !llvm.TargetRef {
+ var result: llvm.TargetRef = undefined;
+ var err_msg: [*]u8 = undefined;
+ if (llvm.GetTargetFromTriple(triple.ptr(), &result, &err_msg) != 0) {
+ std.debug.warn("triple: {s} error: {s}\n", triple.ptr(), err_msg);
+ return error.UnsupportedTarget;
+ }
+ return result;
+ }
+
+ pub fn cIntTypeSizeInBits(self: Target, id: CInt.Id) u32 {
+ const arch = self.getArch();
+ switch (self.getOs()) {
+ builtin.Os.freestanding => switch (self.getArch()) {
+ builtin.Arch.msp430 => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 16,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return 32,
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+ else => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return self.getArchPtrBitWidth(),
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+ },
+
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ builtin.Os.openbsd,
+ builtin.Os.zen,
+ => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return self.getArchPtrBitWidth(),
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+
+ builtin.Os.windows => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+
+ builtin.Os.ananas,
+ builtin.Os.cloudabi,
+ builtin.Os.dragonfly,
+ builtin.Os.freebsd,
+ builtin.Os.fuchsia,
+ builtin.Os.ios,
+ builtin.Os.kfreebsd,
+ builtin.Os.lv2,
+ builtin.Os.netbsd,
+ builtin.Os.solaris,
+ builtin.Os.haiku,
+ builtin.Os.minix,
+ builtin.Os.rtems,
+ builtin.Os.nacl,
+ builtin.Os.cnk,
+ builtin.Os.aix,
+ builtin.Os.cuda,
+ builtin.Os.nvcl,
+ builtin.Os.amdhsa,
+ builtin.Os.ps4,
+ builtin.Os.elfiamcu,
+ builtin.Os.tvos,
+ builtin.Os.watchos,
+ builtin.Os.mesa3d,
+ builtin.Os.contiki,
+ builtin.Os.amdpal,
+ => @panic("TODO specify the C integer type sizes for this OS"),
+ }
+ }
+
+ pub fn getDarwinArchString(self: Target) []const u8 {
+ const arch = self.getArch();
+ switch (arch) {
+ builtin.Arch.aarch64 => return "arm64",
+ builtin.Arch.thumb,
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ => return "arm",
+ builtin.Arch.powerpc => return "ppc",
+ builtin.Arch.powerpc64 => return "ppc64",
+ builtin.Arch.powerpc64le => return "ppc64le",
+ else => return @tagName(arch),
+ }
+ }
+};
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
new file mode 100644
index 0000000000..47e45d1bb0
--- /dev/null
+++ b/src-self-hosted/test.zig
@@ -0,0 +1,243 @@
+const std = @import("std");
+const mem = std.mem;
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const Compilation = @import("compilation.zig").Compilation;
+const introspect = @import("introspect.zig");
+const assertOrPanic = std.debug.assertOrPanic;
+const errmsg = @import("errmsg.zig");
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+
+var ctx: TestContext = undefined;
+
+test "stage2" {
+ try ctx.init();
+ defer ctx.deinit();
+
+ try @import("../test/stage2/compile_errors.zig").addCases(&ctx);
+ try @import("../test/stage2/compare_output.zig").addCases(&ctx);
+
+ try ctx.run();
+}
+
+const file1 = "1.zig";
+const allocator = std.heap.c_allocator;
+
+pub const TestContext = struct {
+ loop: std.event.Loop,
+ event_loop_local: EventLoopLocal,
+ zig_lib_dir: []u8,
+ file_index: std.atomic.Int(usize),
+ group: std.event.Group(error!void),
+ any_err: error!void,
+
+ const tmp_dir_name = "stage2_test_tmp";
+
+ fn init(self: *TestContext) !void {
+ self.* = TestContext{
+ .any_err = {},
+ .loop = undefined,
+ .event_loop_local = undefined,
+ .zig_lib_dir = undefined,
+ .group = undefined,
+ .file_index = std.atomic.Int(usize).init(0),
+ };
+
+ try self.loop.initMultiThreaded(allocator);
+ errdefer self.loop.deinit();
+
+ self.event_loop_local = try EventLoopLocal.init(&self.loop);
+ errdefer self.event_loop_local.deinit();
+
+ self.group = std.event.Group(error!void).init(&self.loop);
+ errdefer self.group.cancelAll();
+
+ self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
+ errdefer allocator.free(self.zig_lib_dir);
+
+ try std.os.makePath(allocator, tmp_dir_name);
+ errdefer std.os.deleteTree(allocator, tmp_dir_name) catch {};
+ }
+
+ fn deinit(self: *TestContext) void {
+ std.os.deleteTree(allocator, tmp_dir_name) catch {};
+ allocator.free(self.zig_lib_dir);
+ self.event_loop_local.deinit();
+ self.loop.deinit();
+ }
+
+ fn run(self: *TestContext) !void {
+ const handle = try self.loop.call(waitForGroup, self);
+ defer cancel handle;
+ self.loop.run();
+ return self.any_err;
+ }
+
+ async fn waitForGroup(self: *TestContext) void {
+ self.any_err = await (async self.group.wait() catch unreachable);
+ }
+
+ fn testCompileError(
+ self: *TestContext,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ msg: []const u8,
+ ) !void {
+ var file_index_buf: [20]u8 = undefined;
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
+ const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
+
+ if (std.os.path.dirname(file1_path)) |dirname| {
+ try std.os.makePath(allocator, dirname);
+ }
+
+ // TODO async I/O
+ try std.io.writeFile(allocator, file1_path, source);
+
+ var comp = try Compilation.create(
+ &self.event_loop_local,
+ "test",
+ file1_path,
+ Target.Native,
+ Compilation.Kind.Obj,
+ builtin.Mode.Debug,
+ true, // is_static
+ self.zig_lib_dir,
+ );
+ errdefer comp.destroy();
+
+ try comp.build();
+
+ try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
+ }
+
+ fn testCompareOutputLibC(
+ self: *TestContext,
+ source: []const u8,
+ expected_output: []const u8,
+ ) !void {
+ var file_index_buf: [20]u8 = undefined;
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
+ const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
+
+ const output_file = try std.fmt.allocPrint(allocator, "{}-out{}", file1_path, Target(Target.Native).exeFileExt());
+ if (std.os.path.dirname(file1_path)) |dirname| {
+ try std.os.makePath(allocator, dirname);
+ }
+
+ // TODO async I/O
+ try std.io.writeFile(allocator, file1_path, source);
+
+ var comp = try Compilation.create(
+ &self.event_loop_local,
+ "test",
+ file1_path,
+ Target.Native,
+ Compilation.Kind.Exe,
+ builtin.Mode.Debug,
+ false,
+ self.zig_lib_dir,
+ );
+ errdefer comp.destroy();
+
+ _ = try comp.addLinkLib("c", true);
+ comp.link_out_file = output_file;
+ try comp.build();
+
+ try self.group.call(getModuleEventSuccess, comp, output_file, expected_output);
+ }
+
+ async fn getModuleEventSuccess(
+ comp: *Compilation,
+ exe_file: []const u8,
+ expected_output: []const u8,
+ ) !void {
+ // TODO this should not be necessary
+ const exe_file_2 = try std.mem.dupe(allocator, u8, exe_file);
+
+ defer comp.destroy();
+ const build_event = await (async comp.events.get() catch unreachable);
+
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ const argv = []const []const u8{exe_file_2};
+ // TODO use event loop
+ const child = try std.os.ChildProcess.exec(allocator, argv, null, null, 1024 * 1024);
+ switch (child.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) {
+ return error.BadReturnCode;
+ }
+ },
+ else => {
+ return error.Crashed;
+ },
+ }
+ if (!mem.eql(u8, child.stdout, expected_output)) {
+ return error.OutputMismatch;
+ }
+ },
+ Compilation.Event.Error => |err| return err,
+ Compilation.Event.Fail => |msgs| {
+ var stderr = try std.io.getStdErr();
+ try stderr.write("build incorrectly failed:\n");
+ for (msgs) |msg| {
+ defer msg.destroy();
+ try msg.printToFile(&stderr, errmsg.Color.Auto);
+ }
+ },
+ }
+ }
+
+ async fn getModuleEvent(
+ comp: *Compilation,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ text: []const u8,
+ ) !void {
+ defer comp.destroy();
+ const build_event = await (async comp.events.get() catch unreachable);
+
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ @panic("build incorrectly succeeded");
+ },
+ Compilation.Event.Error => |err| {
+ @panic("build incorrectly failed");
+ },
+ Compilation.Event.Fail => |msgs| {
+ assertOrPanic(msgs.len != 0);
+ for (msgs) |msg| {
+ if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) {
+ const first_token = msg.getTree().tokens.at(msg.span.first);
+ const last_token = msg.getTree().tokens.at(msg.span.first);
+ const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
+ if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
+ return;
+ }
+ }
+ }
+ std.debug.warn(
+ "\n=====source:=======\n{}\n====expected:========\n{}:{}:{}: error: {}\n",
+ source,
+ path,
+ line,
+ column,
+ text,
+ );
+ std.debug.warn("\n====found:========\n");
+ var stderr = try std.io.getStdErr();
+ for (msgs) |msg| {
+ defer msg.destroy();
+ try msg.printToFile(&stderr, errmsg.Color.Auto);
+ }
+ std.debug.warn("============\n");
+ return error.TestFailed;
+ },
+ }
+ }
+};
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
new file mode 100644
index 0000000000..6783130fc7
--- /dev/null
+++ b/src-self-hosted/type.zig
@@ -0,0 +1,1101 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+const Value = @import("value.zig").Value;
+const llvm = @import("llvm.zig");
+const event = std.event;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+pub const Type = struct {
+ base: Value,
+ id: Id,
+ name: []const u8,
+ abi_alignment: AbiAlignment,
+
+ pub const AbiAlignment = event.Future(error{OutOfMemory}!u32);
+
+ pub const Id = builtin.TypeId;
+
+ pub fn destroy(base: *Type, comp: *Compilation) void {
+ switch (base.id) {
+ Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
+ Id.Float => @fieldParentPtr(Float, "base", base).destroy(comp),
+ Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(comp),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
+ Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(comp),
+ Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(comp),
+ Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(comp),
+ Id.Null => @fieldParentPtr(Null, "base", base).destroy(comp),
+ Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(comp),
+ Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(comp),
+ Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(comp),
+ Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(comp),
+ Id.Union => @fieldParentPtr(Union, "base", base).destroy(comp),
+ Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(comp),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(comp),
+ Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(comp),
+ Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(comp),
+ Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(comp),
+ }
+ }
+
+ pub fn getLlvmType(
+ base: *Type,
+ allocator: *Allocator,
+ llvm_context: llvm.ContextRef,
+ ) (error{OutOfMemory}!llvm.TypeRef) {
+ switch (base.id) {
+ Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Type => unreachable,
+ Id.Void => unreachable,
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(allocator, llvm_context),
+ Id.NoReturn => unreachable,
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ComptimeFloat => unreachable,
+ Id.ComptimeInt => unreachable,
+ Id.Undefined => unreachable,
+ Id.Null => unreachable,
+ Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Namespace => unreachable,
+ Id.Block => unreachable,
+ Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ArgTuple => unreachable,
+ Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(allocator, llvm_context),
+ }
+ }
+
+ pub fn handleIsPtr(base: *Type) bool {
+ switch (base.id) {
+ Id.Type,
+ Id.ComptimeFloat,
+ Id.ComptimeInt,
+ Id.Undefined,
+ Id.Null,
+ Id.Namespace,
+ Id.Block,
+ Id.BoundFn,
+ Id.ArgTuple,
+ Id.Opaque,
+ => unreachable,
+
+ Id.NoReturn,
+ Id.Void,
+ Id.Bool,
+ Id.Int,
+ Id.Float,
+ Id.Pointer,
+ Id.ErrorSet,
+ Id.Enum,
+ Id.Fn,
+ Id.Promise,
+ => return false,
+
+ Id.Struct => @panic("TODO"),
+ Id.Array => @panic("TODO"),
+ Id.Optional => @panic("TODO"),
+ Id.ErrorUnion => @panic("TODO"),
+ Id.Union => @panic("TODO"),
+ }
+ }
+
+ pub fn hasBits(base: *Type) bool {
+ switch (base.id) {
+ Id.Type,
+ Id.ComptimeFloat,
+ Id.ComptimeInt,
+ Id.Undefined,
+ Id.Null,
+ Id.Namespace,
+ Id.Block,
+ Id.BoundFn,
+ Id.ArgTuple,
+ Id.Opaque,
+ => unreachable,
+
+ Id.Void,
+ Id.NoReturn,
+ => return false,
+
+ Id.Bool,
+ Id.Int,
+ Id.Float,
+ Id.Fn,
+ Id.Promise,
+ => return true,
+
+ Id.Pointer => {
+ const ptr_type = @fieldParentPtr(Pointer, "base", base);
+ return ptr_type.key.child_type.hasBits();
+ },
+
+ Id.ErrorSet => @panic("TODO"),
+ Id.Enum => @panic("TODO"),
+ Id.Struct => @panic("TODO"),
+ Id.Array => @panic("TODO"),
+ Id.Optional => @panic("TODO"),
+ Id.ErrorUnion => @panic("TODO"),
+ Id.Union => @panic("TODO"),
+ }
+ }
+
+ pub fn cast(base: *Type, comptime T: type) ?*T {
+ if (base.id != @field(Id, @typeName(T))) return null;
+ return @fieldParentPtr(T, "base", base);
+ }
+
+ pub fn dump(base: *const Type) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ fn init(base: *Type, comp: *Compilation, id: Id, name: []const u8) void {
+ base.* = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = id,
+ .name = name,
+ .abi_alignment = AbiAlignment.init(comp.loop),
+ };
+ }
+
+ /// If you happen to have an llvm context handy, use getAbiAlignmentInContext instead.
+ /// Otherwise, this one will grab one from the pool and then release it.
+ pub async fn getAbiAlignment(base: *Type, comp: *Compilation) !u32 {
+ if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
+
+ {
+ const held = try comp.event_loop_local.getAnyLlvmContext();
+ defer held.release(comp.event_loop_local);
+
+ const llvm_context = held.node.data;
+
+ base.abi_alignment.data = await (async base.resolveAbiAlignment(comp, llvm_context) catch unreachable);
+ }
+ base.abi_alignment.resolve();
+ return base.abi_alignment.data;
+ }
+
+ /// If you have an llvm conext handy, you can use it here.
+ pub async fn getAbiAlignmentInContext(base: *Type, comp: *Compilation, llvm_context: llvm.ContextRef) !u32 {
+ if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
+
+ base.abi_alignment.data = await (async base.resolveAbiAlignment(comp, llvm_context) catch unreachable);
+ base.abi_alignment.resolve();
+ return base.abi_alignment.data;
+ }
+
+ /// Lower level function that does the work. See getAbiAlignment.
+ async fn resolveAbiAlignment(base: *Type, comp: *Compilation, llvm_context: llvm.ContextRef) !u32 {
+ const llvm_type = try base.getLlvmType(comp.gpa(), llvm_context);
+ return @intCast(u32, llvm.ABIAlignmentOfType(comp.target_data_ref, llvm_type));
+ }
+
+ pub const Struct = struct {
+ base: Type,
+ decls: *Scope.Decls,
+
+ pub fn destroy(self: *Struct, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Struct, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Fn = struct {
+ base: Type,
+ key: Key,
+ non_key: NonKey,
+ garbage_node: std.atomic.Stack(*Fn).Node,
+
+ pub const Kind = enum {
+ Normal,
+ Generic,
+ };
+
+ pub const NonKey = union {
+ Normal: Normal,
+ Generic: void,
+
+ pub const Normal = struct {
+ variable_list: std.ArrayList(*Scope.Var),
+ };
+ };
+
+ pub const Key = struct {
+ data: Data,
+ alignment: ?u32,
+
+ pub const Data = union(Kind) {
+ Generic: Generic,
+ Normal: Normal,
+ };
+
+ pub const Normal = struct {
+ params: []Param,
+ return_type: *Type,
+ is_var_args: bool,
+ cc: CallingConvention,
+ };
+
+ pub const Generic = struct {
+ param_count: usize,
+ cc: CC,
+
+ pub const CC = union(CallingConvention) {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async: *Type, // allocator type
+ };
+ };
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.alignment, 0);
+ switch (self.data) {
+ Kind.Generic => |generic| {
+ result +%= hashAny(generic.param_count, 1);
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| result +%= hashAny(allocator_type, 2),
+ else => result +%= hashAny(CallingConvention(generic.cc), 3),
+ }
+ },
+ Kind.Normal => |normal| {
+ result +%= hashAny(normal.return_type, 4);
+ result +%= hashAny(normal.is_var_args, 5);
+ result +%= hashAny(normal.cc, 6);
+ for (normal.params) |param| {
+ result +%= hashAny(param.is_noalias, 7);
+ result +%= hashAny(param.typ, 8);
+ }
+ },
+ }
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ if ((self.alignment == null) != (other.alignment == null)) return false;
+ if (self.alignment) |self_align| {
+ if (self_align != other.alignment.?) return false;
+ }
+ if (@TagType(Data)(self.data) != @TagType(Data)(other.data)) return false;
+ switch (self.data) {
+ Kind.Generic => |*self_generic| {
+ const other_generic = &other.data.Generic;
+ if (self_generic.param_count != other_generic.param_count) return false;
+ if (CallingConvention(self_generic.cc) != CallingConvention(other_generic.cc)) return false;
+ switch (self_generic.cc) {
+ CallingConvention.Async => |self_allocator_type| {
+ const other_allocator_type = other_generic.cc.Async;
+ if (self_allocator_type != other_allocator_type) return false;
+ },
+ else => {},
+ }
+ },
+ Kind.Normal => |*self_normal| {
+ const other_normal = &other.data.Normal;
+ if (self_normal.cc != other_normal.cc) return false;
+ if (self_normal.is_var_args != other_normal.is_var_args) return false;
+ if (self_normal.return_type != other_normal.return_type) return false;
+ for (self_normal.params) |*self_param, i| {
+ const other_param = &other_normal.params[i];
+ if (self_param.is_noalias != other_param.is_noalias) return false;
+ if (self_param.typ != other_param.typ) return false;
+ }
+ },
+ }
+ return true;
+ }
+
+ pub fn deref(key: Key, comp: *Compilation) void {
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.deref(comp),
+ else => {},
+ }
+ },
+ Kind.Normal => |normal| {
+ normal.return_type.base.deref(comp);
+ for (normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ },
+ }
+ }
+
+ pub fn ref(key: Key) void {
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.ref(),
+ else => {},
+ }
+ },
+ Kind.Normal => |normal| {
+ normal.return_type.base.ref();
+ for (normal.params) |param| {
+ param.typ.base.ref();
+ }
+ },
+ }
+ }
+ };
+
+ pub const CallingConvention = enum {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async,
+ };
+
+ pub const Param = struct {
+ is_noalias: bool,
+ typ: *Type,
+ };
+
+ fn ccFnTypeStr(cc: CallingConvention) []const u8 {
+ return switch (cc) {
+ CallingConvention.Auto => "",
+ CallingConvention.C => "extern ",
+ CallingConvention.Cold => "coldcc ",
+ CallingConvention.Naked => "nakedcc ",
+ CallingConvention.Stdcall => "stdcallcc ",
+ CallingConvention.Async => unreachable,
+ };
+ }
+
+ pub fn paramCount(self: *Fn) usize {
+ return switch (self.key.data) {
+ Kind.Generic => |generic| generic.param_count,
+ Kind.Normal => |normal| normal.params.len,
+ };
+ }
+
+ /// takes ownership of key.Normal.params on success
+ pub async fn get(comp: *Compilation, key: Key) !*Fn {
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ key.ref();
+ errdefer key.deref(comp);
+
+ const self = try comp.gpa().createOne(Fn);
+ self.* = Fn{
+ .base = undefined,
+ .key = key,
+ .non_key = undefined,
+ .garbage_node = undefined,
+ };
+ errdefer comp.gpa().destroy(self);
+
+ var name_buf = try std.Buffer.initSize(comp.gpa(), 0);
+ defer name_buf.deinit();
+
+ const name_stream = &std.io.BufferOutStream.init(&name_buf).stream;
+
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ self.non_key = NonKey{ .Generic = {} };
+ switch (generic.cc) {
+ CallingConvention.Async => |async_allocator_type| {
+ try name_stream.print("async<{}> ", async_allocator_type.name);
+ },
+ else => {
+ const cc_str = ccFnTypeStr(generic.cc);
+ try name_stream.write(cc_str);
+ },
+ }
+ try name_stream.write("fn(");
+ var param_i: usize = 0;
+ while (param_i < generic.param_count) : (param_i += 1) {
+ const arg = if (param_i == 0) "var" else ", var";
+ try name_stream.write(arg);
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.write(" var");
+ },
+ Kind.Normal => |normal| {
+ self.non_key = NonKey{
+ .Normal = NonKey.Normal{ .variable_list = std.ArrayList(*Scope.Var).init(comp.gpa()) },
+ };
+ const cc_str = ccFnTypeStr(normal.cc);
+ try name_stream.print("{}fn(", cc_str);
+ for (normal.params) |param, i| {
+ if (i != 0) try name_stream.write(", ");
+ if (param.is_noalias) try name_stream.write("noalias ");
+ try name_stream.write(param.typ.name);
+ }
+ if (normal.is_var_args) {
+ if (normal.params.len != 0) try name_stream.write(", ");
+ try name_stream.write("...");
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.print(" {}", normal.return_type.name);
+ },
+ }
+
+ self.base.init(comp, Id.Fn, name_buf.toOwnedSlice());
+
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ self.key.deref(comp);
+ switch (self.key.data) {
+ Kind.Generic => {},
+ Kind.Normal => {
+ self.non_key.Normal.variable_list.deinit();
+ },
+ }
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Fn, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const normal = &self.key.data.Normal;
+ const llvm_return_type = switch (normal.return_type.id) {
+ Type.Id.Void => llvm.VoidTypeInContext(llvm_context) orelse return error.OutOfMemory,
+ else => try normal.return_type.getLlvmType(allocator, llvm_context),
+ };
+ const llvm_param_types = try allocator.alloc(llvm.TypeRef, normal.params.len);
+ defer allocator.free(llvm_param_types);
+ for (llvm_param_types) |*llvm_param_type, i| {
+ llvm_param_type.* = try normal.params[i].typ.getLlvmType(allocator, llvm_context);
+ }
+
+ return llvm.FunctionType(
+ llvm_return_type,
+ llvm_param_types.ptr,
+ @intCast(c_uint, llvm_param_types.len),
+ @boolToInt(normal.is_var_args),
+ ) orelse error.OutOfMemory;
+ }
+ };
+
+ pub const MetaType = struct {
+ base: Type,
+ value: *Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *MetaType {
+ comp.meta_type.base.base.ref();
+ return comp.meta_type;
+ }
+
+ pub fn destroy(self: *MetaType, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Void = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_type.base.base.ref();
+ return comp.void_type;
+ }
+
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *Bool {
+ comp.bool_type.base.base.ref();
+ return comp.bool_type;
+ }
+
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Bool, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_type.base.base.ref();
+ return comp.noreturn_type;
+ }
+
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Int = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Int).Node,
+
+ pub const Key = struct {
+ bit_count: u32,
+ is_signed: bool,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.is_signed, 0);
+ result +%= hashAny(self.bit_count, 1);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ return self.bit_count == other.bit_count and self.is_signed == other.is_signed;
+ }
+ };
+
+ pub fn get_u8(comp: *Compilation) *Int {
+ comp.u8_type.base.base.ref();
+ return comp.u8_type;
+ }
+
+ pub async fn get(comp: *Compilation, key: Key) !*Int {
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Int{
+ .base = undefined,
+ .key = key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const u_or_i = "ui"[@boolToInt(key.is_signed)];
+ const name = try std.fmt.allocPrint(comp.gpa(), "{c}{}", u_or_i, key.bit_count);
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Int, name);
+
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn destroy(self: *Int, comp: *Compilation) void {
+ self.garbage_node = std.atomic.Stack(*Int).Node{
+ .data = self,
+ .next = undefined,
+ };
+ comp.registerGarbage(Int, &self.garbage_node);
+ }
+
+ pub async fn gcDestroy(self: *Int, comp: *Compilation) void {
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = held.value.remove(&self.key).?;
+ }
+ // we allocated the name
+ comp.gpa().free(self.base.name);
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Int, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ return llvm.IntTypeInContext(llvm_context, self.key.bit_count) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const Float = struct {
+ base: Type,
+
+ pub fn destroy(self: *Float, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Float, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+ pub const Pointer = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Pointer).Node,
+
+ pub const Key = struct {
+ child_type: *Type,
+ mut: Mut,
+ vol: Vol,
+ size: Size,
+ alignment: Align,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= switch (self.alignment) {
+ Align.Abi => 0xf201c090,
+ Align.Override => |x| hashAny(x, 0),
+ };
+ result +%= hashAny(self.child_type, 1);
+ result +%= hashAny(self.mut, 2);
+ result +%= hashAny(self.vol, 3);
+ result +%= hashAny(self.size, 4);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ if (self.child_type != other.child_type or
+ self.mut != other.mut or
+ self.vol != other.vol or
+ self.size != other.size or
+ @TagType(Align)(self.alignment) != @TagType(Align)(other.alignment))
+ {
+ return false;
+ }
+ switch (self.alignment) {
+ Align.Abi => return true,
+ Align.Override => |x| return x == other.alignment.Override,
+ }
+ }
+ };
+
+ pub const Mut = enum {
+ Mut,
+ Const,
+ };
+
+ pub const Vol = enum {
+ Non,
+ Volatile,
+ };
+
+ pub const Align = union(enum) {
+ Abi,
+ Override: u32,
+ };
+
+ pub const Size = builtin.TypeInfo.Pointer.Size;
+
+ pub fn destroy(self: *Pointer, comp: *Compilation) void {
+ self.garbage_node = std.atomic.Stack(*Pointer).Node{
+ .data = self,
+ .next = undefined,
+ };
+ comp.registerGarbage(Pointer, &self.garbage_node);
+ }
+
+ pub async fn gcDestroy(self: *Pointer, comp: *Compilation) void {
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = held.value.remove(&self.key).?;
+ }
+ self.key.child_type.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn getAlignAsInt(self: *Pointer, comp: *Compilation) u32 {
+ switch (self.key.alignment) {
+ Align.Abi => return await (async self.key.child_type.getAbiAlignment(comp) catch unreachable),
+ Align.Override => |alignment| return alignment,
+ }
+ }
+
+ pub async fn get(
+ comp: *Compilation,
+ key: Key,
+ ) !*Pointer {
+ var normal_key = key;
+ switch (key.alignment) {
+ Align.Abi => {},
+ Align.Override => |alignment| {
+ const abi_align = try await (async key.child_type.getAbiAlignment(comp) catch unreachable);
+ if (abi_align == alignment) {
+ normal_key.alignment = Align.Abi;
+ }
+ },
+ }
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&normal_key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Pointer{
+ .base = undefined,
+ .key = normal_key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const size_str = switch (self.key.size) {
+ Size.One => "*",
+ Size.Many => "[*]",
+ Size.Slice => "[]",
+ };
+ const mut_str = switch (self.key.mut) {
+ Mut.Const => "const ",
+ Mut.Mut => "",
+ };
+ const vol_str = switch (self.key.vol) {
+ Vol.Volatile => "volatile ",
+ Vol.Non => "",
+ };
+ const name = switch (self.key.alignment) {
+ Align.Abi => try std.fmt.allocPrint(
+ comp.gpa(),
+ "{}{}{}{}",
+ size_str,
+ mut_str,
+ vol_str,
+ self.key.child_type.name,
+ ),
+ Align.Override => |alignment| try std.fmt.allocPrint(
+ comp.gpa(),
+ "{}align<{}> {}{}{}",
+ size_str,
+ alignment,
+ mut_str,
+ vol_str,
+ self.key.child_type.name,
+ ),
+ };
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Pointer, name);
+
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn getLlvmType(self: *Pointer, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const elem_llvm_type = try self.key.child_type.getLlvmType(allocator, llvm_context);
+ return llvm.PointerType(elem_llvm_type, 0) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const Array = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Array).Node,
+
+ pub const Key = struct {
+ elem_type: *Type,
+ len: usize,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.elem_type, 0);
+ result +%= hashAny(self.len, 1);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ return self.elem_type == other.elem_type and self.len == other.len;
+ }
+ };
+
+ pub fn destroy(self: *Array, comp: *Compilation) void {
+ self.key.elem_type.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn get(comp: *Compilation, key: Key) !*Array {
+ key.elem_type.base.ref();
+ errdefer key.elem_type.base.deref(comp);
+
+ {
+ const held = await (async comp.array_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Array{
+ .base = undefined,
+ .key = key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const name = try std.fmt.allocPrint(comp.gpa(), "[{}]{}", key.len, key.elem_type.name);
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Array, name);
+
+ {
+ const held = await (async comp.array_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn getLlvmType(self: *Array, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const elem_llvm_type = try self.key.elem_type.getLlvmType(allocator, llvm_context);
+ return llvm.ArrayType(elem_llvm_type, @intCast(c_uint, self.key.len)) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const ComptimeFloat = struct {
+ base: Type,
+
+ pub fn destroy(self: *ComptimeFloat, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const ComptimeInt = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *ComptimeInt {
+ comp.comptime_int_type.base.base.ref();
+ return comp.comptime_int_type;
+ }
+
+ pub fn destroy(self: *ComptimeInt, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Undefined = struct {
+ base: Type,
+
+ pub fn destroy(self: *Undefined, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Null = struct {
+ base: Type,
+
+ pub fn destroy(self: *Null, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Optional = struct {
+ base: Type,
+
+ pub fn destroy(self: *Optional, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Optional, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ErrorUnion = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorUnion, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *ErrorUnion, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ErrorSet = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorSet, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *ErrorSet, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Enum = struct {
+ base: Type,
+
+ pub fn destroy(self: *Enum, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Enum, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Union = struct {
+ base: Type,
+
+ pub fn destroy(self: *Union, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Union, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Namespace = struct {
+ base: Type,
+
+ pub fn destroy(self: *Namespace, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Block = struct {
+ base: Type,
+
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const BoundFn = struct {
+ base: Type,
+
+ pub fn destroy(self: *BoundFn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *BoundFn, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ArgTuple = struct {
+ base: Type,
+
+ pub fn destroy(self: *ArgTuple, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Opaque = struct {
+ base: Type,
+
+ pub fn destroy(self: *Opaque, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Opaque, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Promise = struct {
+ base: Type,
+
+ pub fn destroy(self: *Promise, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Promise, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+};
+
+fn hashAny(x: var, comptime seed: u64) u32 {
+ switch (@typeInfo(@typeOf(x))) {
+ builtin.TypeId.Int => |info| {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const unsigned_x = @bitCast(@IntType(false, info.bits), x);
+ if (info.bits <= 32) {
+ return u32(unsigned_x) *% comptime rng.random.scalar(u32);
+ } else {
+ return @truncate(u32, unsigned_x *% comptime rng.random.scalar(@typeOf(unsigned_x)));
+ }
+ },
+ builtin.TypeId.Pointer => |info| {
+ switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One => return hashAny(@ptrToInt(x), seed),
+ builtin.TypeInfo.Pointer.Size.Many => @compileError("implement hash function"),
+ builtin.TypeInfo.Pointer.Size.Slice => @compileError("implement hash function"),
+ }
+ },
+ builtin.TypeId.Enum => return hashAny(@enumToInt(x), seed),
+ builtin.TypeId.Bool => {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const vals = comptime [2]u32{ rng.random.scalar(u32), rng.random.scalar(u32) };
+ return vals[@boolToInt(x)];
+ },
+ builtin.TypeId.Optional => {
+ if (x) |non_opt| {
+ return hashAny(non_opt, seed);
+ } else {
+ return hashAny(u32(1), seed);
+ }
+ },
+ else => @compileError("implement hash function for " ++ @typeName(@typeOf(x))),
+ }
+}
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
new file mode 100644
index 0000000000..e6dca4eff7
--- /dev/null
+++ b/src-self-hosted/value.zig
@@ -0,0 +1,581 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+const ObjectFile = @import("codegen.zig").ObjectFile;
+const llvm = @import("llvm.zig");
+const Buffer = std.Buffer;
+const assert = std.debug.assert;
+
+/// Values are ref-counted, heap-allocated, and copy-on-write
+/// If there is only 1 ref then write need not copy
+pub const Value = struct {
+ id: Id,
+ typ: *Type,
+ ref_count: std.atomic.Int(usize),
+
+ /// Thread-safe
+ pub fn ref(base: *Value) void {
+ _ = base.ref_count.incr();
+ }
+
+ /// Thread-safe
+ pub fn deref(base: *Value, comp: *Compilation) void {
+ if (base.ref_count.decr() == 1) {
+ base.typ.base.deref(comp);
+ switch (base.id) {
+ Id.Type => @fieldParentPtr(Type, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.FnProto => @fieldParentPtr(FnProto, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(comp),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
+ }
+ }
+ }
+
+ pub fn setType(base: *Value, new_type: *Type, comp: *Compilation) void {
+ base.typ.base.deref(comp);
+ new_type.base.ref();
+ base.typ = new_type;
+ }
+
+ pub fn getRef(base: *Value) *Value {
+ base.ref();
+ return base;
+ }
+
+ pub fn cast(base: *Value, comptime T: type) ?*T {
+ if (base.id != @field(Id, @typeName(T))) return null;
+ return @fieldParentPtr(T, "base", base);
+ }
+
+ pub fn dump(base: *const Value) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ pub fn getLlvmConst(base: *Value, ofile: *ObjectFile) (error{OutOfMemory}!?llvm.ValueRef) {
+ switch (base.id) {
+ Id.Type => unreachable,
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmConst(ofile),
+ Id.FnProto => return @fieldParentPtr(FnProto, "base", base).getLlvmConst(ofile),
+ Id.Void => return null,
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmConst(ofile),
+ Id.NoReturn => unreachable,
+ Id.Ptr => return @fieldParentPtr(Ptr, "base", base).getLlvmConst(ofile),
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmConst(ofile),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmConst(ofile),
+ }
+ }
+
+ pub fn derefAndCopy(self: *Value, comp: *Compilation) (error{OutOfMemory}!*Value) {
+ if (self.ref_count.get() == 1) {
+ // ( ͡° ͜ʖ ͡°)
+ return self;
+ }
+
+ assert(self.ref_count.decr() != 1);
+ return self.copy(comp);
+ }
+
+ pub fn copy(base: *Value, comp: *Compilation) (error{OutOfMemory}!*Value) {
+ switch (base.id) {
+ Id.Type => unreachable,
+ Id.Fn => unreachable,
+ Id.FnProto => unreachable,
+ Id.Void => unreachable,
+ Id.Bool => unreachable,
+ Id.NoReturn => unreachable,
+ Id.Ptr => unreachable,
+ Id.Array => unreachable,
+ Id.Int => return &(try @fieldParentPtr(Int, "base", base).copy(comp)).base,
+ }
+ }
+
+ pub const Parent = union(enum) {
+ None,
+ BaseStruct: BaseStruct,
+ BaseArray: BaseArray,
+ BaseUnion: *Value,
+ BaseScalar: *Value,
+
+ pub const BaseStruct = struct {
+ val: *Value,
+ field_index: usize,
+ };
+
+ pub const BaseArray = struct {
+ val: *Value,
+ elem_index: usize,
+ };
+ };
+
+ pub const Id = enum {
+ Type,
+ Fn,
+ Void,
+ Bool,
+ NoReturn,
+ Array,
+ Ptr,
+ Int,
+ FnProto,
+ };
+
+ pub const Type = @import("type.zig").Type;
+
+ pub const FnProto = struct {
+ base: Value,
+
+ /// The main external name that is used in the .o file.
+ /// TODO https://github.com/ziglang/zig/issues/265
+ symbol_name: Buffer,
+
+ pub fn create(comp: *Compilation, fn_type: *Type.Fn, symbol_name: Buffer) !*FnProto {
+ const self = try comp.gpa().create(FnProto{
+ .base = Value{
+ .id = Value.Id.FnProto,
+ .typ = &fn_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .symbol_name = symbol_name,
+ });
+ fn_type.base.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *FnProto, comp: *Compilation) void {
+ self.symbol_name.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *FnProto, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_fn_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ self.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ // TODO port more logic from codegen.cpp:fn_llvm_value
+
+ return llvm_fn;
+ }
+ };
+
+ pub const Fn = struct {
+ base: Value,
+
+ /// The main external name that is used in the .o file.
+ /// TODO https://github.com/ziglang/zig/issues/265
+ symbol_name: Buffer,
+
+ /// parent should be the top level decls or container decls
+ fndef_scope: *Scope.FnDef,
+
+ /// parent is scope for last parameter
+ child_scope: *Scope,
+
+ /// parent is child_scope
+ block_scope: ?*Scope.Block,
+
+ /// Path to the object file that contains this function
+ containing_object: Buffer,
+
+ link_set_node: *std.LinkedList(?*Value.Fn).Node,
+
+ /// Creates a Fn value with 1 ref
+ /// Takes ownership of symbol_name
+ pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: Buffer) !*Fn {
+ const link_set_node = try comp.gpa().create(Compilation.FnLinkSet.Node{
+ .data = null,
+ .next = undefined,
+ .prev = undefined,
+ });
+ errdefer comp.gpa().destroy(link_set_node);
+
+ const self = try comp.gpa().create(Fn{
+ .base = Value{
+ .id = Value.Id.Fn,
+ .typ = &fn_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .fndef_scope = fndef_scope,
+ .child_scope = &fndef_scope.base,
+ .block_scope = null,
+ .symbol_name = symbol_name,
+ .containing_object = Buffer.initNull(comp.gpa()),
+ .link_set_node = link_set_node,
+ });
+ fn_type.base.base.ref();
+ fndef_scope.fn_val = self;
+ fndef_scope.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ // remove with a tombstone so that we do not have to grab a lock
+ if (self.link_set_node.data != null) {
+ // it's now the job of the link step to find this tombstone and
+ // deallocate it.
+ self.link_set_node.data = null;
+ } else {
+ comp.gpa().destroy(self.link_set_node);
+ }
+
+ self.containing_object.deinit();
+ self.fndef_scope.base.deref(comp);
+ self.symbol_name.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ /// We know that the function definition will end up in an .o file somewhere.
+ /// Here, all we have to do is generate a global prototype.
+ /// TODO cache the prototype per ObjectFile
+ pub fn getLlvmConst(self: *Fn, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_fn_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ self.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ // TODO port more logic from codegen.cpp:fn_llvm_value
+
+ return llvm_fn;
+ }
+ };
+
+ pub const Void = struct {
+ base: Value,
+
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_value.base.ref();
+ return comp.void_value;
+ }
+
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Value,
+ x: bool,
+
+ pub fn get(comp: *Compilation, x: bool) *Bool {
+ if (x) {
+ comp.true_value.base.ref();
+ return comp.true_value;
+ } else {
+ comp.false_value.base.ref();
+ return comp.false_value;
+ }
+ }
+
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Bool, ofile: *ObjectFile) ?llvm.ValueRef {
+ const llvm_type = llvm.Int1TypeInContext(ofile.context);
+ if (self.x) {
+ return llvm.ConstAllOnes(llvm_type);
+ } else {
+ return llvm.ConstNull(llvm_type);
+ }
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Value,
+
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_value.base.ref();
+ return comp.noreturn_value;
+ }
+
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Ptr = struct {
+ base: Value,
+ special: Special,
+ mut: Mut,
+
+ pub const Mut = enum {
+ CompTimeConst,
+ CompTimeVar,
+ RunTime,
+ };
+
+ pub const Special = union(enum) {
+ Scalar: *Value,
+ BaseArray: BaseArray,
+ BaseStruct: BaseStruct,
+ HardCodedAddr: u64,
+ Discard,
+ };
+
+ pub const BaseArray = struct {
+ val: *Value,
+ elem_index: usize,
+ };
+
+ pub const BaseStruct = struct {
+ val: *Value,
+ field_index: usize,
+ };
+
+ pub async fn createArrayElemPtr(
+ comp: *Compilation,
+ array_val: *Array,
+ mut: Type.Pointer.Mut,
+ size: Type.Pointer.Size,
+ elem_index: usize,
+ ) !*Ptr {
+ array_val.base.ref();
+ errdefer array_val.base.deref(comp);
+
+ const elem_type = array_val.base.typ.cast(Type.Array).?.key.elem_type;
+ const ptr_type = try await (async Type.Pointer.get(comp, Type.Pointer.Key{
+ .child_type = elem_type,
+ .mut = mut,
+ .vol = Type.Pointer.Vol.Non,
+ .size = size,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ var ptr_type_consumed = false;
+ errdefer if (!ptr_type_consumed) ptr_type.base.base.deref(comp);
+
+ const self = try comp.gpa().create(Value.Ptr{
+ .base = Value{
+ .id = Value.Id.Ptr,
+ .typ = &ptr_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .special = Special{
+ .BaseArray = BaseArray{
+ .val = &array_val.base,
+ .elem_index = 0,
+ },
+ },
+ .mut = Mut.CompTimeConst,
+ });
+ ptr_type_consumed = true;
+ errdefer comp.gpa().destroy(self);
+
+ return self;
+ }
+
+ pub fn destroy(self: *Ptr, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Ptr, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_type = self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ // TODO carefully port the logic from codegen.cpp:gen_const_val_ptr
+ switch (self.special) {
+ Special.Scalar => |scalar| @panic("TODO"),
+ Special.BaseArray => |base_array| {
+ // TODO put this in one .o file only, and after that, generate extern references to it
+ const array_llvm_value = (try base_array.val.getLlvmConst(ofile)).?;
+ const ptr_bit_count = ofile.comp.target_ptr_bits;
+ const usize_llvm_type = llvm.IntTypeInContext(ofile.context, ptr_bit_count) orelse return error.OutOfMemory;
+ const indices = []llvm.ValueRef{
+ llvm.ConstNull(usize_llvm_type) orelse return error.OutOfMemory,
+ llvm.ConstInt(usize_llvm_type, base_array.elem_index, 0) orelse return error.OutOfMemory,
+ };
+ return llvm.ConstInBoundsGEP(
+ array_llvm_value,
+ &indices,
+ @intCast(c_uint, indices.len),
+ ) orelse return error.OutOfMemory;
+ },
+ Special.BaseStruct => |base_struct| @panic("TODO"),
+ Special.HardCodedAddr => |addr| @panic("TODO"),
+ Special.Discard => unreachable,
+ }
+ }
+ };
+
+ pub const Array = struct {
+ base: Value,
+ special: Special,
+
+ pub const Special = union(enum) {
+ Undefined,
+ OwnedBuffer: []u8,
+ Explicit: Data,
+ };
+
+ pub const Data = struct {
+ parent: Parent,
+ elements: []*Value,
+ };
+
+ /// Takes ownership of buffer
+ pub async fn createOwnedBuffer(comp: *Compilation, buffer: []u8) !*Array {
+ const u8_type = Type.Int.get_u8(comp);
+ defer u8_type.base.base.deref(comp);
+
+ const array_type = try await (async Type.Array.get(comp, Type.Array.Key{
+ .elem_type = &u8_type.base,
+ .len = buffer.len,
+ }) catch unreachable);
+ errdefer array_type.base.base.deref(comp);
+
+ const self = try comp.gpa().create(Value.Array{
+ .base = Value{
+ .id = Value.Id.Array,
+ .typ = &array_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .special = Special{ .OwnedBuffer = buffer },
+ });
+ errdefer comp.gpa().destroy(self);
+
+ return self;
+ }
+
+ pub fn destroy(self: *Array, comp: *Compilation) void {
+ switch (self.special) {
+ Special.Undefined => {},
+ Special.OwnedBuffer => |buf| {
+ comp.gpa().free(buf);
+ },
+ Special.Explicit => {},
+ }
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Array, ofile: *ObjectFile) !?llvm.ValueRef {
+ switch (self.special) {
+ Special.Undefined => {
+ const llvm_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ return llvm.GetUndef(llvm_type);
+ },
+ Special.OwnedBuffer => |buf| {
+ const dont_null_terminate = 1;
+ const llvm_str_init = llvm.ConstStringInContext(
+ ofile.context,
+ buf.ptr,
+ @intCast(c_uint, buf.len),
+ dont_null_terminate,
+ ) orelse return error.OutOfMemory;
+ const str_init_type = llvm.TypeOf(llvm_str_init);
+ const global = llvm.AddGlobal(ofile.module, str_init_type, c"") orelse return error.OutOfMemory;
+ llvm.SetInitializer(global, llvm_str_init);
+ llvm.SetLinkage(global, llvm.PrivateLinkage);
+ llvm.SetGlobalConstant(global, 1);
+ llvm.SetUnnamedAddr(global, 1);
+ llvm.SetAlignment(global, llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, str_init_type));
+ return global;
+ },
+ Special.Explicit => @panic("TODO"),
+ }
+
+ //{
+ // uint64_t len = type_entry->data.array.len;
+ // if (const_val->data.x_array.special == ConstArraySpecialUndef) {
+ // return LLVMGetUndef(type_entry->type_ref);
+ // }
+
+ // LLVMValueRef *values = allocate<LLVMValueRef>(len);
+ // LLVMTypeRef element_type_ref = type_entry->data.array.child_type->type_ref;
+ // bool make_unnamed_struct = false;
+ // for (uint64_t i = 0; i < len; i += 1) {
+ // ConstExprValue *elem_value = &const_val->data.x_array.s_none.elements[i];
+ // LLVMValueRef val = gen_const_val(g, elem_value, "");
+ // values[i] = val;
+ // make_unnamed_struct = make_unnamed_struct || is_llvm_value_unnamed_type(elem_value->type, val);
+ // }
+ // if (make_unnamed_struct) {
+ // return LLVMConstStruct(values, len, true);
+ // } else {
+ // return LLVMConstArray(element_type_ref, values, (unsigned)len);
+ // }
+ //}
+ }
+ };
+
+ pub const Int = struct {
+ base: Value,
+ big_int: std.math.big.Int,
+
+ pub fn createFromString(comp: *Compilation, typ: *Type, base: u8, value: []const u8) !*Int {
+ const self = try comp.gpa().create(Value.Int{
+ .base = Value{
+ .id = Value.Id.Int,
+ .typ = typ,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .big_int = undefined,
+ });
+ typ.base.ref();
+ errdefer comp.gpa().destroy(self);
+
+ self.big_int = try std.math.big.Int.init(comp.gpa());
+ errdefer self.big_int.deinit();
+
+ try self.big_int.setString(base, value);
+
+ return self;
+ }
+
+ pub fn getLlvmConst(self: *Int, ofile: *ObjectFile) !?llvm.ValueRef {
+ switch (self.base.typ.id) {
+ Type.Id.Int => {
+ const type_ref = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ if (self.big_int.len == 0) {
+ return llvm.ConstNull(type_ref);
+ }
+ const unsigned_val = if (self.big_int.len == 1) blk: {
+ break :blk llvm.ConstInt(type_ref, self.big_int.limbs[0], @boolToInt(false));
+ } else if (@sizeOf(std.math.big.Limb) == @sizeOf(u64)) blk: {
+ break :blk llvm.ConstIntOfArbitraryPrecision(
+ type_ref,
+ @intCast(c_uint, self.big_int.len),
+ @ptrCast([*]u64, self.big_int.limbs.ptr),
+ );
+ } else {
+ @compileError("std.math.Big.Int.Limb size does not match LLVM");
+ };
+ return if (self.big_int.positive) unsigned_val else llvm.ConstNeg(unsigned_val);
+ },
+ Type.Id.ComptimeInt => unreachable,
+ else => unreachable,
+ }
+ }
+
+ pub fn copy(old: *Int, comp: *Compilation) !*Int {
+ old.base.typ.base.ref();
+ errdefer old.base.typ.base.deref(comp);
+
+ const new = try comp.gpa().create(Value.Int{
+ .base = Value{
+ .id = Value.Id.Int,
+ .typ = old.base.typ,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .big_int = undefined,
+ });
+ errdefer comp.gpa().destroy(new);
+
+ new.big_int = try old.big_int.clone();
+ errdefer new.big_int.deinit();
+
+ return new;
+ }
+
+ pub fn destroy(self: *Int, comp: *Compilation) void {
+ self.big_int.deinit();
+ comp.gpa().destroy(self);
+ }
+ };
+};
diff --git a/src-self-hosted/visib.zig b/src-self-hosted/visib.zig
new file mode 100644
index 0000000000..3704600cca
--- /dev/null
+++ b/src-self-hosted/visib.zig
@@ -0,0 +1,4 @@
+pub const Visib = enum {
+ Private,
+ Pub,
+};