aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig44
-rw-r--r--src/codegen/llvm.zig761
-rw-r--r--src/codegen/wasm.zig19
3 files changed, 436 insertions, 388 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index af8d2d272d..6e68a43607 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -14,6 +14,7 @@ const TypedValue = @import("../TypedValue.zig");
const C = link.File.C;
const Decl = Module.Decl;
const trace = @import("../tracy.zig").trace;
+const LazySrcLoc = Module.LazySrcLoc;
const Mutability = enum { Const, Mut };
@@ -145,11 +146,10 @@ pub const DeclGen = struct {
error_msg: ?*Module.ErrorMsg,
typedefs: TypedefMap,
- fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
- dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, .{
- .file_scope = dg.decl.getFileScope(),
- .byte_offset = src,
- }, format, args);
+ fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
+ @setCold(true);
+ const src_loc = src.toSrcLocWithDecl(dg.decl);
+ dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args);
return error.AnalysisFail;
}
@@ -160,7 +160,7 @@ pub const DeclGen = struct {
val: Value,
) error{ OutOfMemory, AnalysisFail }!void {
if (val.isUndef()) {
- return dg.fail(dg.decl.src(), "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
+ return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
}
switch (t.zigTypeTag()) {
.Int => {
@@ -193,7 +193,7 @@ pub const DeclGen = struct {
try writer.print("{s}", .{decl.name});
},
else => |e| return dg.fail(
- dg.decl.src(),
+ .{ .node_offset = 0 },
"TODO: C backend: implement Pointer value {s}",
.{@tagName(e)},
),
@@ -276,7 +276,7 @@ pub const DeclGen = struct {
try writer.writeAll(", .error = 0 }");
}
},
- else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement value {s}", .{
+ else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{
@tagName(e),
}),
}
@@ -350,7 +350,7 @@ pub const DeclGen = struct {
break;
}
} else {
- return dg.fail(dg.decl.src(), "TODO: C backend: implement integer types larger than 128 bits", .{});
+ return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{});
}
},
else => unreachable,
@@ -358,7 +358,7 @@ pub const DeclGen = struct {
},
.Pointer => {
if (t.isSlice()) {
- return dg.fail(dg.decl.src(), "TODO: C backend: implement slices", .{});
+ return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement slices", .{});
} else {
try dg.renderType(w, t.elemType());
try w.writeAll(" *");
@@ -431,7 +431,7 @@ pub const DeclGen = struct {
dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
},
.Null, .Undefined => unreachable, // must be const or comptime
- else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement type {s}", .{
+ else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type {s}", .{
@tagName(e),
}),
}
@@ -569,13 +569,15 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?),
.is_err => try genIsErr(o, inst.castTag(.is_err).?),
.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?),
+ .error_to_int => try genErrorToInt(o, inst.castTag(.error_to_int).?),
+ .int_to_error => try genIntToError(o, inst.castTag(.int_to_error).?),
.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?),
.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?),
.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?),
.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?),
- else => |e| return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement codegen for {}", .{e}),
+ else => |e| return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for {}", .{e}),
};
switch (result_value) {
.none => {},
@@ -756,11 +758,11 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
try writer.writeAll(");\n");
return result_local;
} else {
- return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement function pointers", .{});
+ return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{});
}
}
-fn genDbgStmt(o: *Object, inst: *Inst.NoOp) !CValue {
+fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
// TODO emit #line directive here with line number and filename
return CValue.none;
}
@@ -913,13 +915,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
try o.writeCValue(writer, arg_c_value);
try writer.writeAll(";\n");
} else {
- return o.dg.fail(o.dg.decl.src(), "TODO non-explicit inline asm regs", .{});
+ return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{});
}
}
const volatile_string: []const u8 = if (as.is_volatile) "volatile " else "";
try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source });
if (as.output) |_| {
- return o.dg.fail(o.dg.decl.src(), "TODO inline asm output", .{});
+ return o.dg.fail(.{ .node_offset = 0 }, "TODO inline asm output", .{});
}
if (as.inputs.len > 0) {
if (as.output == null) {
@@ -945,7 +947,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
if (as.base.isUnused())
return CValue.none;
- return o.dg.fail(o.dg.decl.src(), "TODO: C backend: inline asm expression result used", .{});
+ return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{});
}
fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue {
@@ -1072,6 +1074,14 @@ fn genIsErr(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
+fn genIntToError(o: *Object, inst: *Inst.UnOp) !CValue {
+ return o.resolveInst(inst.operand);
+}
+
+fn genErrorToInt(o: *Object, inst: *Inst.UnOp) !CValue {
+ return o.resolveInst(inst.operand);
+}
+
fn IndentWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 7233dbdd07..cd601debda 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -15,6 +15,8 @@ const Inst = ir.Inst;
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
+const LazySrcLoc = Module.LazySrcLoc;
+
pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
@@ -143,79 +145,42 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
return std.fmt.allocPrintZ(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi });
}
-pub const LLVMIRModule = struct {
- module: *Module,
+pub const Object = struct {
llvm_module: *const llvm.Module,
context: *const llvm.Context,
target_machine: *const llvm.TargetMachine,
- builder: *const llvm.Builder,
-
- object_path: []const u8,
-
- gpa: *Allocator,
- err_msg: ?*Module.ErrorMsg = null,
-
- // TODO: The fields below should really move into a different struct,
- // because they are only valid when generating a function
-
- /// This stores the LLVM values used in a function, such that they can be
- /// referred to in other instructions. This table is cleared before every function is generated.
- /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
- /// in here, however if a block ends, the instructions can be thrown away.
- func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value) = .{},
-
- /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
- args: []*const llvm.Value = &[_]*const llvm.Value{},
- arg_index: usize = 0,
-
- entry_block: *const llvm.BasicBlock = undefined,
- /// This fields stores the last alloca instruction, such that we can append more alloca instructions
- /// to the top of the function.
- latest_alloca_inst: ?*const llvm.Value = null,
+ object_pathZ: [:0]const u8,
- llvm_func: *const llvm.Value = undefined,
-
- /// This data structure is used to implement breaking to blocks.
- blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
- parent_bb: *const llvm.BasicBlock,
- break_bbs: *BreakBasicBlocks,
- break_vals: *BreakValues,
- }) = .{},
-
- src_loc: Module.SrcLoc,
-
- const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
- const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
-
- pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*LLVMIRModule {
- const self = try allocator.create(LLVMIRModule);
+ pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+ const self = try allocator.create(Object);
errdefer allocator.destroy(self);
- const gpa = options.module.?.gpa;
-
- const obj_basename = try std.zig.binNameAlloc(gpa, .{
+ const obj_basename = try std.zig.binNameAlloc(allocator, .{
.root_name = options.root_name,
.target = options.target,
.output_mode = .Obj,
});
- defer gpa.free(obj_basename);
+ defer allocator.free(obj_basename);
const o_directory = options.module.?.zig_cache_artifact_directory;
- const object_path = try o_directory.join(gpa, &[_][]const u8{obj_basename});
- errdefer gpa.free(object_path);
+ const object_path = try o_directory.join(allocator, &[_][]const u8{obj_basename});
+ defer allocator.free(object_path);
+
+ const object_pathZ = try allocator.dupeZ(u8, object_path);
+ errdefer allocator.free(object_pathZ);
const context = llvm.Context.create();
errdefer context.dispose();
initializeLLVMTargets();
- const root_nameZ = try gpa.dupeZ(u8, options.root_name);
- defer gpa.free(root_nameZ);
+ const root_nameZ = try allocator.dupeZ(u8, options.root_name);
+ defer allocator.free(root_nameZ);
const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context);
errdefer llvm_module.dispose();
- const llvm_target_triple = try targetTriple(gpa, options.target);
- defer gpa.free(llvm_target_triple);
+ const llvm_target_triple = try targetTriple(allocator, options.target);
+ defer allocator.free(llvm_target_triple);
var error_message: [*:0]const u8 = undefined;
var target: *const llvm.Target = undefined;
@@ -250,34 +215,21 @@ pub const LLVMIRModule = struct {
);
errdefer target_machine.dispose();
- const builder = context.createBuilder();
- errdefer builder.dispose();
-
self.* = .{
- .module = options.module.?,
.llvm_module = llvm_module,
.context = context,
.target_machine = target_machine,
- .builder = builder,
- .object_path = object_path,
- .gpa = gpa,
- // TODO move this field into a struct that is only instantiated per gen() call
- .src_loc = undefined,
+ .object_pathZ = object_pathZ,
};
return self;
}
- pub fn deinit(self: *LLVMIRModule, allocator: *Allocator) void {
- self.builder.dispose();
+ pub fn deinit(self: *Object, allocator: *Allocator) void {
self.target_machine.dispose();
self.llvm_module.dispose();
self.context.dispose();
- self.func_inst_table.deinit(self.gpa);
- self.gpa.free(self.object_path);
-
- self.blocks.deinit(self.gpa);
-
+ allocator.free(self.object_pathZ);
allocator.destroy(self);
}
@@ -289,7 +241,7 @@ pub const LLVMIRModule = struct {
llvm.initializeAllAsmParsers();
}
- pub fn flushModule(self: *LLVMIRModule, comp: *Compilation) !void {
+ pub fn flushModule(self: *Object, comp: *Compilation) !void {
if (comp.verbose_llvm_ir) {
const dump = self.llvm_module.printToString();
defer llvm.disposeMessage(dump);
@@ -310,13 +262,10 @@ pub const LLVMIRModule = struct {
}
}
- const object_pathZ = try self.gpa.dupeZ(u8, self.object_path);
- defer self.gpa.free(object_pathZ);
-
var error_message: [*:0]const u8 = undefined;
if (self.target_machine.emitToFile(
self.llvm_module,
- object_pathZ.ptr,
+ self.object_pathZ.ptr,
.ObjectFile,
&error_message,
).toBool()) {
@@ -328,44 +277,68 @@ pub const LLVMIRModule = struct {
}
}
- pub fn updateDecl(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
- self.gen(module, decl) catch |err| switch (err) {
+ pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
+ var dg: DeclGen = .{
+ .object = self,
+ .module = module,
+ .decl = decl,
+ .err_msg = null,
+ .gpa = module.gpa,
+ };
+ dg.genDecl() catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl, self.err_msg.?);
- self.err_msg = null;
+ try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
+ dg.err_msg = null;
return;
},
else => |e| return e,
};
}
+};
- fn gen(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
- const typed_value = decl.typed_value.most_recent.typed_value;
- const src = decl.src();
+pub const DeclGen = struct {
+ object: *Object,
+ module: *Module,
+ decl: *Module.Decl,
+ err_msg: ?*Module.ErrorMsg,
- self.src_loc = decl.srcLoc();
+ gpa: *Allocator,
+
+ fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ @setCold(true);
+ assert(self.err_msg == null);
+ const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLocWithDecl(self.decl);
+ self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args);
+ return error.CodegenFail;
+ }
+
+ fn llvmModule(self: *DeclGen) *const llvm.Module {
+ return self.object.llvm_module;
+ }
+
+ fn context(self: *DeclGen) *const llvm.Context {
+ return self.object.context;
+ }
+
+ fn genDecl(self: *DeclGen) !void {
+ const decl = self.decl;
+ const typed_value = decl.typed_value.most_recent.typed_value;
log.debug("gen: {s} type: {}, value: {}", .{ decl.name, typed_value.ty, typed_value.val });
if (typed_value.val.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const llvm_func = try self.resolveLLVMFunction(func.owner_decl, src);
+ const llvm_func = try self.resolveLLVMFunction(func.owner_decl);
// This gets the LLVM values from the function and stores them in `self.args`.
const fn_param_len = func.owner_decl.typed_value.most_recent.typed_value.ty.fnParamLen();
var args = try self.gpa.alloc(*const llvm.Value, fn_param_len);
- defer self.gpa.free(args);
for (args) |*arg, i| {
arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
}
- self.args = args;
- self.arg_index = 0;
-
- // Make sure no other LLVM values from other functions can be referenced
- self.func_inst_table.clearRetainingCapacity();
// We remove all the basic blocks of a function to support incremental
// compilation!
@@ -374,20 +347,293 @@ pub const LLVMIRModule = struct {
bb.deleteBasicBlock();
}
- self.entry_block = self.context.appendBasicBlock(llvm_func, "Entry");
- self.builder.positionBuilderAtEnd(self.entry_block);
- self.latest_alloca_inst = null;
- self.llvm_func = llvm_func;
+ const builder = self.context().createBuilder();
+
+ const entry_block = self.context().appendBasicBlock(llvm_func, "Entry");
+ builder.positionBuilderAtEnd(entry_block);
+
+ var fg: FuncGen = .{
+ .dg = self,
+ .builder = builder,
+ .args = args,
+ .arg_index = 0,
+ .func_inst_table = .{},
+ .entry_block = entry_block,
+ .latest_alloca_inst = null,
+ .llvm_func = llvm_func,
+ .blocks = .{},
+ };
+ defer fg.deinit();
- try self.genBody(func.body);
+ try fg.genBody(func.body);
} else if (typed_value.val.castTag(.extern_fn)) |extern_fn| {
- _ = try self.resolveLLVMFunction(extern_fn.data, src);
+ _ = try self.resolveLLVMFunction(extern_fn.data);
} else {
- _ = try self.resolveGlobalDecl(decl, src);
+ _ = try self.resolveGlobalDecl(decl);
+ }
+ }
+
+ /// If the llvm function does not exist, create it
+ fn resolveLLVMFunction(self: *DeclGen, func: *Module.Decl) !*const llvm.Value {
+ // TODO: do we want to store this in our own datastructure?
+ if (self.llvmModule().getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
+
+ const zig_fn_type = func.typed_value.most_recent.typed_value.ty;
+ const return_type = zig_fn_type.fnReturnType();
+
+ const fn_param_len = zig_fn_type.fnParamLen();
+
+ const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
+ defer self.gpa.free(fn_param_types);
+ zig_fn_type.fnParamTypes(fn_param_types);
+
+ const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
+ defer self.gpa.free(llvm_param);
+
+ for (fn_param_types) |fn_param, i| {
+ llvm_param[i] = try self.getLLVMType(fn_param);
+ }
+
+ const fn_type = llvm.Type.functionType(
+ try self.getLLVMType(return_type),
+ if (fn_param_len == 0) null else llvm_param.ptr,
+ @intCast(c_uint, fn_param_len),
+ .False,
+ );
+ const llvm_fn = self.llvmModule().addFunction(func.name, fn_type);
+
+ if (return_type.tag() == .noreturn) {
+ self.addFnAttr(llvm_fn, "noreturn");
+ }
+
+ return llvm_fn;
+ }
+
+ fn resolveGlobalDecl(self: *DeclGen, decl: *Module.Decl) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+ // TODO: do we want to store this in our own datastructure?
+ if (self.llvmModule().getNamedGlobal(decl.name)) |val| return val;
+
+ const typed_value = decl.typed_value.most_recent.typed_value;
+
+ // TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
+ const llvm_type = try self.getLLVMType(typed_value.ty);
+ const val = try self.genTypedValue(typed_value, null);
+ const global = self.llvmModule().addGlobal(llvm_type, decl.name);
+ llvm.setInitializer(global, val);
+
+ // TODO ask the Decl if it is const
+ // https://github.com/ziglang/zig/issues/7582
+
+ return global;
+ }
+
+ fn getLLVMType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
+ switch (t.zigTypeTag()) {
+ .Void => return self.context().voidType(),
+ .NoReturn => return self.context().voidType(),
+ .Int => {
+ const info = t.intInfo(self.module.getTarget());
+ return self.context().intType(info.bits);
+ },
+ .Bool => return self.context().intType(1),
+ .Pointer => {
+ if (t.isSlice()) {
+ return self.todo("implement slices", .{});
+ } else {
+ const elem_type = try self.getLLVMType(t.elemType());
+ return elem_type.pointerType(0);
+ }
+ },
+ .Array => {
+ const elem_type = try self.getLLVMType(t.elemType());
+ return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
+ },
+ .Optional => {
+ if (!t.isPtrLikeOptional()) {
+ var buf: Type.Payload.ElemType = undefined;
+ const child_type = t.optionalChild(&buf);
+
+ var optional_types: [2]*const llvm.Type = .{
+ try self.getLLVMType(child_type),
+ self.context().intType(1),
+ };
+ return self.context().structType(&optional_types, 2, .False);
+ } else {
+ return self.todo("implement optional pointers as actual pointers", .{});
+ }
+ },
+ else => return self.todo("implement getLLVMType for type '{}'", .{t}),
+ }
+ }
+
+ // TODO: figure out a way to remove the FuncGen argument
+ fn genTypedValue(self: *DeclGen, tv: TypedValue, fg: ?*FuncGen) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+ const llvm_type = try self.getLLVMType(tv.ty);
+
+ if (tv.val.isUndef())
+ return llvm_type.getUndef();
+
+ switch (tv.ty.zigTypeTag()) {
+ .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
+ .Int => {
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = tv.val.toBigInt(&bigint_space);
+
+ if (bigint.eqZero()) return llvm_type.constNull();
+
+ if (bigint.limbs.len != 1) {
+ return self.todo("implement bigger bigint", .{});
+ }
+ const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
+ if (!bigint.positive) {
+ return llvm.constNeg(llvm_int);
+ }
+ return llvm_int;
+ },
+ .Pointer => switch (tv.val.tag()) {
+ .decl_ref => {
+ const decl = tv.val.castTag(.decl_ref).?.data;
+ const val = try self.resolveGlobalDecl(decl);
+
+ const usize_type = try self.getLLVMType(Type.initTag(.usize));
+
+ // TODO: second index should be the index into the memory!
+ var indices: [2]*const llvm.Value = .{
+ usize_type.constNull(),
+ usize_type.constNull(),
+ };
+
+ // TODO: consider using buildInBoundsGEP2 for opaque pointers
+ return fg.?.builder.buildInBoundsGEP(val, &indices, 2, "");
+ },
+ .ref_val => {
+ const elem_value = tv.val.castTag(.ref_val).?.data;
+ const elem_type = tv.ty.castPointer().?.data;
+ const alloca = fg.?.buildAlloca(try self.getLLVMType(elem_type));
+ _ = fg.?.builder.buildStore(try self.genTypedValue(.{ .ty = elem_type, .val = elem_value }, fg), alloca);
+ return alloca;
+ },
+ else => return self.todo("implement const of pointer type '{}'", .{tv.ty}),
+ },
+ .Array => {
+ if (tv.val.castTag(.bytes)) |payload| {
+ const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
+ if (sentinel.tag() == .zero) break :blk true;
+ return self.todo("handle other sentinel values", .{});
+ } else false;
+
+ return self.context().constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
+ } else {
+ return self.todo("handle more array values", .{});
+ }
+ },
+ .Optional => {
+ if (!tv.ty.isPtrLikeOptional()) {
+ var buf: Type.Payload.ElemType = undefined;
+ const child_type = tv.ty.optionalChild(&buf);
+ const llvm_child_type = try self.getLLVMType(child_type);
+
+ if (tv.val.tag() == .null_value) {
+ var optional_values: [2]*const llvm.Value = .{
+ llvm_child_type.constNull(),
+ self.context().intType(1).constNull(),
+ };
+ return self.context().constStruct(&optional_values, 2, .False);
+ } else {
+ var optional_values: [2]*const llvm.Value = .{
+ try self.genTypedValue(.{ .ty = child_type, .val = tv.val }, fg),
+ self.context().intType(1).constAllOnes(),
+ };
+ return self.context().constStruct(&optional_values, 2, .False);
+ }
+ } else {
+ return self.todo("implement const of optional pointer", .{});
+ }
+ },
+ else => return self.todo("implement const of type '{}'", .{tv.ty}),
+ }
+ }
+
+ // Helper functions
+ fn addAttr(self: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
+ assert(kind_id != 0);
+ const llvm_attr = self.context().createEnumAttribute(kind_id, 0);
+ val.addAttributeAtIndex(index, llvm_attr);
+ }
+
+ fn addFnAttr(self: *DeclGen, val: *const llvm.Value, attr_name: []const u8) void {
+ // TODO: improve this API, `addAttr(-1, attr_name)`
+ self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
+ }
+};
+
+pub const FuncGen = struct {
+ dg: *DeclGen,
+
+ builder: *const llvm.Builder,
+
+ /// This stores the LLVM values used in a function, such that they can be
+ /// referred to in other instructions. This table is cleared before every function is generated.
+ /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
+ /// in here, however if a block ends, the instructions can be thrown away.
+ func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value),
+
+ /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
+ args: []*const llvm.Value,
+ arg_index: usize,
+
+ entry_block: *const llvm.BasicBlock,
+ /// This fields stores the last alloca instruction, such that we can append more alloca instructions
+ /// to the top of the function.
+ latest_alloca_inst: ?*const llvm.Value,
+
+ llvm_func: *const llvm.Value,
+
+ /// This data structure is used to implement breaking to blocks.
+ blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
+ parent_bb: *const llvm.BasicBlock,
+ break_bbs: *BreakBasicBlocks,
+ break_vals: *BreakValues,
+ }),
+
+ const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
+ const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
+
+ fn deinit(self: *FuncGen) void {
+ self.builder.dispose();
+ self.func_inst_table.deinit(self.gpa());
+ self.gpa().free(self.args);
+ self.blocks.deinit(self.gpa());
+ }
+
+ fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ @setCold(true);
+ return self.dg.todo(format, args);
+ }
+
+ fn llvmModule(self: *FuncGen) *const llvm.Module {
+ return self.dg.object.llvm_module;
+ }
+
+ fn context(self: *FuncGen) *const llvm.Context {
+ return self.dg.object.context;
+ }
+
+ fn gpa(self: *FuncGen) *Allocator {
+ return self.dg.gpa;
+ }
+
+ fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value {
+ if (inst.value()) |val| {
+ return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self);
}
+ if (self.func_inst_table.get(inst)) |value| return value;
+
+ return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{});
}
- fn genBody(self: *LLVMIRModule, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
+ fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
for (body.instructions) |inst| {
const opt_value = switch (inst.tag) {
.add => try self.genAdd(inst.castTag(.add).?),
@@ -425,13 +671,13 @@ pub const LLVMIRModule = struct {
// TODO: implement debug info
break :blk null;
},
- else => |tag| return self.fail(inst.src, "TODO implement LLVM codegen for Zir instruction: {}", .{tag}),
+ else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}),
};
- if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
+ if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val);
}
}
- fn genCall(self: *LLVMIRModule, inst: *Inst.Call) !?*const llvm.Value {
+ fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value {
if (inst.func.value()) |func_value| {
const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
extern_fn.data
@@ -441,12 +687,12 @@ pub const LLVMIRModule = struct {
unreachable;
const zig_fn_type = fn_decl.typed_value.most_recent.typed_value.ty;
- const llvm_fn = try self.resolveLLVMFunction(fn_decl, inst.base.src);
+ const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl);
const num_args = inst.args.len;
- const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, num_args);
- defer self.gpa.free(llvm_param_vals);
+ const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args);
+ defer self.gpa().free(llvm_param_vals);
for (inst.args) |arg, i| {
llvm_param_vals[i] = try self.resolveInst(arg);
@@ -471,27 +717,32 @@ pub const LLVMIRModule = struct {
return call;
} else {
- return self.fail(inst.base.src, "TODO implement calling runtime known function pointer LLVM backend", .{});
+ return self.todo("implement calling runtime known function pointer", .{});
}
}
- fn genRetVoid(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+ fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = self.builder.buildRetVoid();
return null;
}
- fn genRet(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
+ if (!inst.operand.ty.hasCodeGenBits()) {
+ // TODO: in astgen these instructions should turn into `retvoid` instructions.
+ _ = self.builder.buildRetVoid();
+ return null;
+ }
_ = self.builder.buildRet(try self.resolveInst(inst.operand));
return null;
}
- fn genCmp(self: *LLVMIRModule, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
+ fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
if (inst.base.ty.tag() != .bool)
- return self.fail(inst.base.src, "TODO implement 'genCmp' for type {}", .{inst.base.ty});
+ return self.todo("implement 'genCmp' for type {}", .{inst.base.ty});
const is_signed = inst.base.ty.isSignedInt();
const operation = switch (op) {
@@ -506,21 +757,21 @@ pub const LLVMIRModule = struct {
return self.builder.buildICmp(operation, lhs, rhs, "");
}
- fn genBlock(self: *LLVMIRModule, inst: *Inst.Block) !?*const llvm.Value {
- const parent_bb = self.context.createBasicBlock("Block");
+ fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value {
+ const parent_bb = self.context().createBasicBlock("Block");
// 5 breaks to a block seems like a reasonable default.
- var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5);
- var break_vals = try BreakValues.initCapacity(self.gpa, 5);
- try self.blocks.putNoClobber(self.gpa, inst, .{
+ var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5);
+ var break_vals = try BreakValues.initCapacity(self.gpa(), 5);
+ try self.blocks.putNoClobber(self.gpa(), inst, .{
.parent_bb = parent_bb,
.break_bbs = &break_bbs,
.break_vals = &break_vals,
});
defer {
self.blocks.removeAssertDiscard(inst);
- break_bbs.deinit(self.gpa);
- break_vals.deinit(self.gpa);
+ break_bbs.deinit(self.gpa());
+ break_vals.deinit(self.gpa());
}
try self.genBody(inst.body);
@@ -531,7 +782,7 @@ pub const LLVMIRModule = struct {
// If the block does not return a value, we dont have to create a phi node.
if (!inst.base.ty.hasCodeGenBits()) return null;
- const phi_node = self.builder.buildPhi(try self.getLLVMType(inst.base.ty, inst.base.src), "");
+ const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), "");
phi_node.addIncoming(
break_vals.items.ptr,
break_bbs.items.ptr,
@@ -540,7 +791,7 @@ pub const LLVMIRModule = struct {
return phi_node;
}
- fn genBr(self: *LLVMIRModule, inst: *Inst.Br) !?*const llvm.Value {
+ fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value {
var block = self.blocks.get(inst.block).?;
// If the break doesn't break a value, then we don't have to add
@@ -553,25 +804,25 @@ pub const LLVMIRModule = struct {
// For the phi node, we need the basic blocks and the values of the
// break instructions.
- try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
- try block.break_vals.append(self.gpa, val);
+ try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock());
+ try block.break_vals.append(self.gpa(), val);
_ = self.builder.buildBr(block.parent_bb);
}
return null;
}
- fn genBrVoid(self: *LLVMIRModule, inst: *Inst.BrVoid) !?*const llvm.Value {
+ fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value {
var block = self.blocks.get(inst.block).?;
_ = self.builder.buildBr(block.parent_bb);
return null;
}
- fn genCondBr(self: *LLVMIRModule, inst: *Inst.CondBr) !?*const llvm.Value {
+ fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value {
const condition_value = try self.resolveInst(inst.condition);
- const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
- const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+ const then_block = self.context().appendBasicBlock(self.llvm_func, "Then");
+ const else_block = self.context().appendBasicBlock(self.llvm_func, "Else");
{
const prev_block = self.builder.getInsertBlock();
defer self.builder.positionBuilderAtEnd(prev_block);
@@ -586,8 +837,8 @@ pub const LLVMIRModule = struct {
return null;
}
- fn genLoop(self: *LLVMIRModule, inst: *Inst.Loop) !?*const llvm.Value {
- const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop");
+ fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value {
+ const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop");
_ = self.builder.buildBr(loop_block);
self.builder.positionBuilderAtEnd(loop_block);
@@ -597,20 +848,20 @@ pub const LLVMIRModule = struct {
return null;
}
- fn genNot(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
return self.builder.buildNot(try self.resolveInst(inst.operand), "");
}
- fn genUnreach(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+ fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = self.builder.buildUnreachable();
return null;
}
- fn genIsNonNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
const operand = try self.resolveInst(inst.operand);
if (operand_is_ptr) {
- const index_type = self.context.intType(32);
+ const index_type = self.context().intType(32);
var indices: [2]*const llvm.Value = .{
index_type.constNull(),
@@ -623,15 +874,15 @@ pub const LLVMIRModule = struct {
}
}
- fn genIsNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
}
- fn genOptionalPayload(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
const operand = try self.resolveInst(inst.operand);
if (operand_is_ptr) {
- const index_type = self.context.intType(32);
+ const index_type = self.context().intType(32);
var indices: [2]*const llvm.Value = .{
index_type.constNull(),
@@ -644,12 +895,12 @@ pub const LLVMIRModule = struct {
}
}
- fn genAdd(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
- return self.fail(inst.base.src, "TODO implement 'genAdd' for type {}", .{inst.base.ty});
+ return self.todo("implement 'genAdd' for type {}", .{inst.base.ty});
return if (inst.base.ty.isSignedInt())
self.builder.buildNSWAdd(lhs, rhs, "")
@@ -657,12 +908,12 @@ pub const LLVMIRModule = struct {
self.builder.buildNUWAdd(lhs, rhs, "");
}
- fn genSub(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
- return self.fail(inst.base.src, "TODO implement 'genSub' for type {}", .{inst.base.ty});
+ return self.todo("implement 'genSub' for type {}", .{inst.base.ty});
return if (inst.base.ty.isSignedInt())
self.builder.buildNSWSub(lhs, rhs, "")
@@ -670,44 +921,44 @@ pub const LLVMIRModule = struct {
self.builder.buildNUWSub(lhs, rhs, "");
}
- fn genIntCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.operand);
const signed = inst.base.ty.isSignedInt();
// TODO: Should we use intcast here or just a simple bitcast?
// LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
- return self.builder.buildIntCast2(val, try self.getLLVMType(inst.base.ty, inst.base.src), llvm.Bool.fromBool(signed), "");
+ return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), "");
}
- fn genBitCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.operand);
- const dest_type = try self.getLLVMType(inst.base.ty, inst.base.src);
+ const dest_type = try self.dg.getLLVMType(inst.base.ty);
return self.builder.buildBitCast(val, dest_type, "");
}
- fn genArg(self: *LLVMIRModule, inst: *Inst.Arg) !?*const llvm.Value {
+ fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
- const ptr_val = self.buildAlloca(try self.getLLVMType(inst.base.ty, inst.base.src));
+ const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty));
_ = self.builder.buildStore(arg_val, ptr_val);
return self.builder.buildLoad(ptr_val, "");
}
- fn genAlloc(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+ fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
// buildAlloca expects the pointee type, not the pointer type, so assert that
// a Payload.PointerSimple is passed to the alloc instruction.
const pointee_type = inst.base.ty.castPointer().?.data;
// TODO: figure out a way to get the name of the var decl.
// TODO: set alignment and volatile
- return self.buildAlloca(try self.getLLVMType(pointee_type, inst.base.src));
+ return self.buildAlloca(try self.dg.getLLVMType(pointee_type));
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
/// put the alloca instruction at the top of the function!
- fn buildAlloca(self: *LLVMIRModule, t: *const llvm.Type) *const llvm.Value {
+ fn buildAlloca(self: *FuncGen, t: *const llvm.Type) *const llvm.Value {
const prev_block = self.builder.getInsertBlock();
defer self.builder.positionBuilderAtEnd(prev_block);
@@ -729,242 +980,30 @@ pub const LLVMIRModule = struct {
return val;
}
- fn genStore(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.rhs);
const ptr = try self.resolveInst(inst.lhs);
_ = self.builder.buildStore(val, ptr);
return null;
}
- fn genLoad(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const ptr_val = try self.resolveInst(inst.operand);
return self.builder.buildLoad(ptr_val, "");
}
- fn genBreakpoint(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+ fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
_ = self.builder.buildCall(llvn_fn, null, 0, "");
return null;
}
- fn getIntrinsic(self: *LLVMIRModule, name: []const u8) *const llvm.Value {
+ fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
// TODO: add support for overload intrinsics by passing the prefix of the intrinsic
// to `lookupIntrinsicID` and then passing the correct types to
// `getIntrinsicDeclaration`
- return self.llvm_module.getIntrinsicDeclaration(id, null, 0);
- }
-
- fn resolveInst(self: *LLVMIRModule, inst: *ir.Inst) !*const llvm.Value {
- if (inst.value()) |val| {
- return self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = val });
- }
- if (self.func_inst_table.get(inst)) |value| return value;
-
- return self.fail(inst.src, "TODO implement global llvm values (or the value is not in the func_inst_table table)", .{});
- }
-
- fn genTypedValue(self: *LLVMIRModule, src: usize, tv: TypedValue) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
- const llvm_type = try self.getLLVMType(tv.ty, src);
-
- if (tv.val.isUndef())
- return llvm_type.getUndef();
-
- switch (tv.ty.zigTypeTag()) {
- .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
- .Int => {
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = tv.val.toBigInt(&bigint_space);
-
- if (bigint.eqZero()) return llvm_type.constNull();
-
- if (bigint.limbs.len != 1) {
- return self.fail(src, "TODO implement bigger bigint", .{});
- }
- const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
- if (!bigint.positive) {
- return llvm.constNeg(llvm_int);
- }
- return llvm_int;
- },
- .Pointer => switch (tv.val.tag()) {
- .decl_ref => {
- const decl = tv.val.castTag(.decl_ref).?.data;
- const val = try self.resolveGlobalDecl(decl, src);
-
- const usize_type = try self.getLLVMType(Type.initTag(.usize), src);
-
- // TODO: second index should be the index into the memory!
- var indices: [2]*const llvm.Value = .{
- usize_type.constNull(),
- usize_type.constNull(),
- };
-
- // TODO: consider using buildInBoundsGEP2 for opaque pointers
- return self.builder.buildInBoundsGEP(val, &indices, 2, "");
- },
- .ref_val => {
- const elem_value = tv.val.castTag(.ref_val).?.data;
- const elem_type = tv.ty.castPointer().?.data;
- const alloca = self.buildAlloca(try self.getLLVMType(elem_type, src));
- _ = self.builder.buildStore(try self.genTypedValue(src, .{ .ty = elem_type, .val = elem_value }), alloca);
- return alloca;
- },
- else => return self.fail(src, "TODO implement const of pointer type '{}'", .{tv.ty}),
- },
- .Array => {
- if (tv.val.castTag(.bytes)) |payload| {
- const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
- if (sentinel.tag() == .zero) break :blk true;
- return self.fail(src, "TODO handle other sentinel values", .{});
- } else false;
-
- return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
- } else {
- return self.fail(src, "TODO handle more array values", .{});
- }
- },
- .Optional => {
- if (!tv.ty.isPtrLikeOptional()) {
- var buf: Type.Payload.ElemType = undefined;
- const child_type = tv.ty.optionalChild(&buf);
- const llvm_child_type = try self.getLLVMType(child_type, src);
-
- if (tv.val.tag() == .null_value) {
- var optional_values: [2]*const llvm.Value = .{
- llvm_child_type.constNull(),
- self.context.intType(1).constNull(),
- };
- return self.context.constStruct(&optional_values, 2, .False);
- } else {
- var optional_values: [2]*const llvm.Value = .{
- try self.genTypedValue(src, .{ .ty = child_type, .val = tv.val }),
- self.context.intType(1).constAllOnes(),
- };
- return self.context.constStruct(&optional_values, 2, .False);
- }
- } else {
- return self.fail(src, "TODO implement const of optional pointer", .{});
- }
- },
- else => return self.fail(src, "TODO implement const of type '{}'", .{tv.ty}),
- }
- }
-
- fn getLLVMType(self: *LLVMIRModule, t: Type, src: usize) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
- switch (t.zigTypeTag()) {
- .Void => return self.context.voidType(),
- .NoReturn => return self.context.voidType(),
- .Int => {
- const info = t.intInfo(self.module.getTarget());
- return self.context.intType(info.bits);
- },
- .Bool => return self.context.intType(1),
- .Pointer => {
- if (t.isSlice()) {
- return self.fail(src, "TODO: LLVM backend: implement slices", .{});
- } else {
- const elem_type = try self.getLLVMType(t.elemType(), src);
- return elem_type.pointerType(0);
- }
- },
- .Array => {
- const elem_type = try self.getLLVMType(t.elemType(), src);
- return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
- },
- .Optional => {
- if (!t.isPtrLikeOptional()) {
- var buf: Type.Payload.ElemType = undefined;
- const child_type = t.optionalChild(&buf);
-
- var optional_types: [2]*const llvm.Type = .{
- try self.getLLVMType(child_type, src),
- self.context.intType(1),
- };
- return self.context.structType(&optional_types, 2, .False);
- } else {
- return self.fail(src, "TODO implement optional pointers as actual pointers", .{});
- }
- },
- else => return self.fail(src, "TODO implement getLLVMType for type '{}'", .{t}),
- }
- }
-
- fn resolveGlobalDecl(self: *LLVMIRModule, decl: *Module.Decl, src: usize) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
- // TODO: do we want to store this in our own datastructure?
- if (self.llvm_module.getNamedGlobal(decl.name)) |val| return val;
-
- const typed_value = decl.typed_value.most_recent.typed_value;
-
- // TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
- const llvm_type = try self.getLLVMType(typed_value.ty, src);
- const val = try self.genTypedValue(src, typed_value);
- const global = self.llvm_module.addGlobal(llvm_type, decl.name);
- llvm.setInitializer(global, val);
-
- // TODO ask the Decl if it is const
- // https://github.com/ziglang/zig/issues/7582
-
- return global;
- }
-
- /// If the llvm function does not exist, create it
- fn resolveLLVMFunction(self: *LLVMIRModule, func: *Module.Decl, src: usize) !*const llvm.Value {
- // TODO: do we want to store this in our own datastructure?
- if (self.llvm_module.getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
-
- const zig_fn_type = func.typed_value.most_recent.typed_value.ty;
- const return_type = zig_fn_type.fnReturnType();
-
- const fn_param_len = zig_fn_type.fnParamLen();
-
- const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
- defer self.gpa.free(fn_param_types);
- zig_fn_type.fnParamTypes(fn_param_types);
-
- const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
- defer self.gpa.free(llvm_param);
-
- for (fn_param_types) |fn_param, i| {
- llvm_param[i] = try self.getLLVMType(fn_param, src);
- }
-
- const fn_type = llvm.Type.functionType(
- try self.getLLVMType(return_type, src),
- if (fn_param_len == 0) null else llvm_param.ptr,
- @intCast(c_uint, fn_param_len),
- .False,
- );
- const llvm_fn = self.llvm_module.addFunction(func.name, fn_type);
-
- if (return_type.tag() == .noreturn) {
- self.addFnAttr(llvm_fn, "noreturn");
- }
-
- return llvm_fn;
- }
-
- // Helper functions
- fn addAttr(self: LLVMIRModule, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
- const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
- assert(kind_id != 0);
- const llvm_attr = self.context.createEnumAttribute(kind_id, 0);
- val.addAttributeAtIndex(index, llvm_attr);
- }
-
- fn addFnAttr(self: *LLVMIRModule, val: *const llvm.Value, attr_name: []const u8) void {
- // TODO: improve this API, `addAttr(-1, attr_name)`
- self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
- }
-
- pub fn fail(self: *LLVMIRModule, src: usize, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
- @setCold(true);
- assert(self.err_msg == null);
- self.err_msg = try Module.ErrorMsg.create(self.gpa, .{
- .file_scope = self.src_loc.file_scope,
- .byte_offset = src,
- }, format, args);
- return error.CodegenFail;
+ return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
}
};
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index d5f68eca81..827f6c366a 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -14,6 +14,7 @@ const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
const Compilation = @import("../Compilation.zig");
const AnyMCValue = @import("../codegen.zig").AnyMCValue;
+const LazySrcLoc = Module.LazySrcLoc;
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
@@ -70,11 +71,9 @@ pub const Context = struct {
}
/// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig
- fn fail(self: *Context, src: usize, comptime fmt: []const u8, args: anytype) InnerError {
- self.err_msg = try Module.ErrorMsg.create(self.gpa, .{
- .file_scope = self.decl.getFileScope(),
- .byte_offset = src,
- }, fmt, args);
+ fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError {
+ const src_loc = src.toSrcLocWithDecl(self.decl);
+ self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args);
return error.CodegenFail;
}
@@ -91,7 +90,7 @@ pub const Context = struct {
}
/// Using a given `Type`, returns the corresponding wasm value type
- fn genValtype(self: *Context, src: usize, ty: Type) InnerError!u8 {
+ fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
return switch (ty.tag()) {
.f32 => wasm.valtype(.f32),
.f64 => wasm.valtype(.f64),
@@ -104,7 +103,7 @@ pub const Context = struct {
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
- fn genBlockType(self: *Context, src: usize, ty: Type) InnerError!u8 {
+ fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
return switch (ty.tag()) {
.void, .noreturn => wasm.block_empty,
else => self.genValtype(src, ty),
@@ -139,7 +138,7 @@ pub const Context = struct {
ty.fnParamTypes(params);
for (params) |param_type| {
// Can we maybe get the source index of each param?
- const val_type = try self.genValtype(self.decl.src(), param_type);
+ const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type);
try writer.writeByte(val_type);
}
}
@@ -151,7 +150,7 @@ pub const Context = struct {
else => |ret_type| {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
- const val_type = try self.genValtype(self.decl.src(), return_type);
+ const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
try writer.writeByte(val_type);
},
}
@@ -168,7 +167,7 @@ pub const Context = struct {
const mod_fn = blk: {
if (tv.val.castTag(.function)) |func| break :blk func.data;
if (tv.val.castTag(.extern_fn)) |ext_fn| return; // don't need codegen for extern functions
- return self.fail(self.decl.src(), "TODO: Wasm codegen for decl type '{s}'", .{tv.ty.tag()});
+ return self.fail(.{ .node_offset = 0 }, "TODO: Wasm codegen for decl type '{s}'", .{tv.ty.tag()});
};
// Reserve space to write the size after generating the code as well as space for locals count