aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorTakeshi Yoneda <takeshi@tetrate.io>2021-08-09 14:39:26 +0900
committerTakeshi Yoneda <takeshi@tetrate.io>2021-08-09 14:39:26 +0900
commit97560cd915008f04addc2c30af087aa89c162b02 (patch)
tree8aed12c207ff84cc256a0c78955c23b61129ba22 /src/codegen
parent7814a2bd4a3ec22cd9548c622f7dc837dba968f7 (diff)
parent799fedf612aa8742c446b015c12d21707a1dbec0 (diff)
downloadzig-97560cd915008f04addc2c30af087aa89c162b02.tar.gz
zig-97560cd915008f04addc2c30af087aa89c162b02.zip
Merge remote-tracking branch 'origin' into libc-wasi-test
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig171
-rw-r--r--src/codegen/llvm.zig760
-rw-r--r--src/codegen/llvm/bindings.zig206
-rw-r--r--src/codegen/wasm.zig267
4 files changed, 1090 insertions, 314 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 705c5c2ad1..a67e2438c2 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -262,6 +262,7 @@ pub const DeclGen = struct {
.one => try writer.writeAll("1"),
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
+ decl.alive = true;
// Determine if we must pointer cast.
assert(decl.has_tv);
@@ -281,36 +282,7 @@ pub const DeclGen = struct {
const decl = val.castTag(.extern_fn).?.data;
try writer.print("{s}", .{decl.name});
},
- else => switch (t.ptrSize()) {
- .Slice => unreachable,
- .Many => {
- if (val.castTag(.ref_val)) |ref_val_payload| {
- const sub_val = ref_val_payload.data;
- if (sub_val.castTag(.bytes)) |bytes_payload| {
- const bytes = bytes_payload.data;
- try writer.writeByte('(');
- try dg.renderType(writer, t);
- // TODO: make our own C string escape instead of using std.zig.fmtEscapes
- try writer.print(")\"{}\"", .{std.zig.fmtEscapes(bytes)});
- } else {
- unreachable;
- }
- } else {
- unreachable;
- }
- },
- .One => {
- var arena = std.heap.ArenaAllocator.init(dg.module.gpa);
- defer arena.deinit();
-
- const elem_ty = t.elemType();
- const elem_val = try val.pointerDeref(&arena.allocator);
-
- try writer.writeAll("&");
- try dg.renderValue(writer, elem_ty, elem_val);
- },
- .C => unreachable,
- },
+ else => unreachable,
},
},
.Array => {
@@ -378,32 +350,25 @@ pub const DeclGen = struct {
.ErrorUnion => {
const error_type = t.errorUnionSet();
const payload_type = t.errorUnionPayload();
- const sub_val = val.castTag(.error_union).?.data;
if (!payload_type.hasCodeGenBits()) {
// We use the error type directly as the type.
- return dg.renderValue(writer, error_type, sub_val);
+ const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val;
+ return dg.renderValue(writer, error_type, err_val);
}
try writer.writeByte('(');
try dg.renderType(writer, t);
try writer.writeAll("){");
- if (val.getError()) |_| {
- try writer.writeAll(" .error = ");
- try dg.renderValue(
- writer,
- error_type,
- sub_val,
- );
- try writer.writeAll(" }");
- } else {
+ if (val.castTag(.eu_payload)) |pl| {
+ const payload_val = pl.data;
try writer.writeAll(" .payload = ");
- try dg.renderValue(
- writer,
- payload_type,
- sub_val,
- );
+ try dg.renderValue(writer, payload_type, payload_val);
try writer.writeAll(", .error = 0 }");
+ } else {
+ try writer.writeAll(" .error = ");
+ try dg.renderValue(writer, error_type, val);
+ try writer.writeAll(" }");
}
},
.Enum => {
@@ -436,6 +401,7 @@ pub const DeclGen = struct {
.one => try writer.writeAll("1"),
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
+ decl.alive = true;
// Determine if we must pointer cast.
assert(decl.has_tv);
@@ -448,11 +414,13 @@ pub const DeclGen = struct {
}
},
.function => {
- const func = val.castTag(.function).?.data;
- try writer.print("{s}", .{func.owner_decl.name});
+ const decl = val.castTag(.function).?.data.owner_decl;
+ decl.alive = true;
+ try writer.print("{s}", .{decl.name});
},
.extern_fn => {
const decl = val.castTag(.extern_fn).?.data;
+ decl.alive = true;
try writer.print("{s}", .{decl.name});
},
else => unreachable,
@@ -875,19 +843,19 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
// TODO use a different strategy for add that communicates to the optimizer
// that wrapping is UB.
- .add => try airBinOp( o, inst, " + "),
- .addwrap => try airWrapOp(o, inst, " + ", "addw_"),
+ .add, .ptr_add => try airBinOp( o, inst, " + "),
+ .addwrap => try airWrapOp(o, inst, " + ", "addw_"),
// TODO use a different strategy for sub that communicates to the optimizer
// that wrapping is UB.
- .sub => try airBinOp( o, inst, " - "),
- .subwrap => try airWrapOp(o, inst, " - ", "subw_"),
+ .sub, .ptr_sub => try airBinOp( o, inst, " - "),
+ .subwrap => try airWrapOp(o, inst, " - ", "subw_"),
// TODO use a different strategy for mul that communicates to the optimizer
// that wrapping is UB.
- .mul => try airBinOp( o, inst, " * "),
- .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"),
+ .mul => try airBinOp( o, inst, " * "),
+ .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try airBinOp( o, inst, " / "),
+ .div => try airBinOp( o, inst, " / "),
.cmp_eq => try airBinOp(o, inst, " == "),
.cmp_gt => try airBinOp(o, inst, " > "),
@@ -925,6 +893,8 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
.call => try airCall(o, inst),
.dbg_stmt => try airDbgStmt(o, inst),
.intcast => try airIntCast(o, inst),
+ .trunc => try airTrunc(o, inst),
+ .bool_to_int => try airBoolToInt(o, inst),
.load => try airLoad(o, inst),
.ret => try airRet(o, inst),
.store => try airStore(o, inst),
@@ -933,12 +903,13 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
.br => try airBr(o, inst),
.switch_br => try airSwitchBr(o, inst),
.wrap_optional => try airWrapOptional(o, inst),
- .ref => try airRef(o, inst),
.struct_field_ptr => try airStructFieldPtr(o, inst),
- .varptr => try airVarPtr(o, inst),
+ .struct_field_val => try airStructFieldVal(o, inst),
.slice_ptr => try airSliceField(o, inst, ".ptr;\n"),
.slice_len => try airSliceField(o, inst, ".len;\n"),
+ .ptr_elem_val => try airPtrElemVal(o, inst, "["),
+ .ptr_ptr_elem_val => try airPtrElemVal(o, inst, "[0]["),
.slice_elem_val => try airSliceElemVal(o, inst, "["),
.ptr_slice_elem_val => try airSliceElemVal(o, inst, "[0]["),
@@ -977,8 +948,18 @@ fn airSliceField(o: *Object, inst: Air.Inst.Index, suffix: []const u8) !CValue {
return local;
}
+fn airPtrElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue {
+ const is_volatile = false; // TODO
+ if (!is_volatile and o.liveness.isUnused(inst))
+ return CValue.none;
+
+ _ = prefix;
+ return o.dg.fail("TODO: C backend: airPtrElemVal", .{});
+}
+
fn airSliceElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue {
- if (o.liveness.isUnused(inst))
+ const is_volatile = false; // TODO
+ if (!is_volatile and o.liveness.isUnused(inst))
return CValue.none;
const bin_op = o.air.instructions.items(.data)[inst].bin_op;
@@ -994,12 +975,6 @@ fn airSliceElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue
return local;
}
-fn airVarPtr(o: *Object, inst: Air.Inst.Index) !CValue {
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const variable = o.air.variables[ty_pl.payload];
- return CValue{ .decl_ref = variable.owner_decl };
-}
-
fn airAlloc(o: *Object, inst: Air.Inst.Index) !CValue {
const writer = o.writer();
const inst_ty = o.air.typeOfIndex(inst);
@@ -1069,7 +1044,7 @@ fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue {
return CValue.none;
const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const from = try o.resolveInst(ty_op.operand);
+ const operand = try o.resolveInst(ty_op.operand);
const writer = o.writer();
const inst_ty = o.air.typeOfIndex(inst);
@@ -1077,7 +1052,31 @@ fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = (");
try o.dg.renderType(writer, inst_ty);
try writer.writeAll(")");
- try o.writeCValue(writer, from);
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ return local;
+}
+
+fn airTrunc(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const operand = try o.resolveInst(ty_op.operand);
+ _ = operand;
+ return o.dg.fail("TODO: C backend: airTrunc", .{});
+}
+
+fn airBoolToInt(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+ const un_op = o.air.instructions.items(.data)[inst].un_op;
+ const writer = o.writer();
+ const inst_ty = o.air.typeOfIndex(inst);
+ const operand = try o.resolveInst(un_op);
+ const local = try o.allocLocal(inst_ty, .Const);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
@@ -1637,22 +1636,6 @@ fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airRef(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
- return CValue.none;
-
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
-
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
- try writer.writeAll(" = ");
- try o.writeCValue(writer, operand);
- try writer.writeAll(";\n");
- return local;
-}
-
fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue {
if (o.liveness.isUnused(inst))
return CValue.none;
@@ -1660,8 +1643,8 @@ fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue {
const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
const extra = o.air.extraData(Air.StructField, ty_pl.payload).data;
const writer = o.writer();
- const struct_ptr = try o.resolveInst(extra.struct_ptr);
- const struct_ptr_ty = o.air.typeOf(extra.struct_ptr);
+ const struct_ptr = try o.resolveInst(extra.struct_operand);
+ const struct_ptr_ty = o.air.typeOf(extra.struct_operand);
const struct_obj = struct_ptr_ty.elemType().castTag(.@"struct").?.data;
const field_name = struct_obj.fields.keys()[extra.field_index];
@@ -1680,6 +1663,26 @@ fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue {
return local;
}
+fn airStructFieldVal(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
+ const extra = o.air.extraData(Air.StructField, ty_pl.payload).data;
+ const writer = o.writer();
+ const struct_byval = try o.resolveInst(extra.struct_operand);
+ const struct_ty = o.air.typeOf(extra.struct_operand);
+ const struct_obj = struct_ty.castTag(.@"struct").?.data;
+ const field_name = struct_obj.fields.keys()[extra.field_index];
+
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, struct_byval);
+ try writer.print(".{};\n", .{fmtIdent(field_name)});
+ return local;
+}
+
// *(E!T) -> E NOT *E
fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
if (o.liveness.isUnused(inst))
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 4224591b0b..7cfbc8da5e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -350,19 +350,21 @@ pub const Object = struct {
air: Air,
liveness: Liveness,
) !void {
+ const decl = func.owner_decl;
+
var dg: DeclGen = .{
.context = self.context,
.object = self,
.module = module,
- .decl = func.owner_decl,
+ .decl = decl,
.err_msg = null,
.gpa = module.gpa,
};
- const llvm_func = try dg.resolveLLVMFunction(func.owner_decl);
+ const llvm_func = try dg.resolveLlvmFunction(decl);
// This gets the LLVM values from the function and stores them in `dg.args`.
- const fn_param_len = func.owner_decl.ty.fnParamLen();
+ const fn_param_len = decl.ty.fnParamLen();
var args = try dg.gpa.alloc(*const llvm.Value, fn_param_len);
for (args) |*arg, i| {
@@ -400,13 +402,16 @@ pub const Object = struct {
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
- func.owner_decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, func.owner_decl, dg.err_msg.?);
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
+
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ try self.updateDeclExports(module, decl, decl_exports);
}
pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
@@ -428,6 +433,38 @@ pub const Object = struct {
else => |e| return e,
};
}
+
+ pub fn updateDeclExports(
+ self: *Object,
+ module: *const Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+ ) !void {
+ const llvm_fn = self.llvm_module.getNamedFunction(decl.name).?;
+ const is_extern = decl.val.tag() == .extern_fn;
+ if (is_extern or exports.len != 0) {
+ llvm_fn.setLinkage(.External);
+ llvm_fn.setUnnamedAddr(.False);
+ } else {
+ llvm_fn.setLinkage(.Internal);
+ llvm_fn.setUnnamedAddr(.True);
+ }
+ // TODO LLVM C API does not support deleting aliases. We need to
+ // patch it to support this or figure out how to wrap the C++ API ourselves.
+ // Until then we iterate over existing aliases and make them point
+ // to the correct decl, or otherwise add a new alias. Old aliases are leaked.
+ for (exports) |exp| {
+ const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name);
+ defer module.gpa.free(exp_name_z);
+
+ if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
+ alias.setAliasee(llvm_fn);
+ } else {
+ const alias = self.llvm_module.addAlias(llvm_fn.typeOf(), llvm_fn, exp_name_z);
+ _ = alias;
+ }
+ }
+ }
};
pub const DeclGen = struct {
@@ -461,21 +498,30 @@ pub const DeclGen = struct {
_ = func_payload;
@panic("TODO llvm backend genDecl function pointer");
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
- _ = try self.resolveLLVMFunction(extern_fn.data);
+ _ = try self.resolveLlvmFunction(extern_fn.data);
} else {
- _ = try self.resolveGlobalDecl(decl);
+ const global = try self.resolveGlobalDecl(decl);
+ assert(decl.has_tv);
+ const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
+ const variable = payload.data;
+ break :init_val variable.init;
+ } else init_val: {
+ global.setGlobalConstant(.True);
+ break :init_val decl.val;
+ };
+
+ const llvm_init = try self.genTypedValue(.{ .ty = decl.ty, .val = init_val });
+ llvm.setInitializer(global, llvm_init);
}
}
/// If the llvm function does not exist, create it
- fn resolveLLVMFunction(self: *DeclGen, func: *Module.Decl) !*const llvm.Value {
- // TODO: do we want to store this in our own datastructure?
- if (self.llvmModule().getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
+ fn resolveLlvmFunction(self: *DeclGen, decl: *Module.Decl) !*const llvm.Value {
+ if (self.llvmModule().getNamedFunction(decl.name)) |llvm_fn| return llvm_fn;
- assert(func.has_tv);
- const zig_fn_type = func.ty;
+ assert(decl.has_tv);
+ const zig_fn_type = decl.ty;
const return_type = zig_fn_type.fnReturnType();
-
const fn_param_len = zig_fn_type.fnParamLen();
const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
@@ -495,9 +541,17 @@ pub const DeclGen = struct {
@intCast(c_uint, fn_param_len),
.False,
);
- const llvm_fn = self.llvmModule().addFunction(func.name, fn_type);
+ const llvm_fn = self.llvmModule().addFunction(decl.name, fn_type);
+
+ const is_extern = decl.val.tag() == .extern_fn;
+ if (!is_extern) {
+ llvm_fn.setLinkage(.Internal);
+ llvm_fn.setUnnamedAddr(.True);
+ }
+
+ // TODO: calling convention, linkage, tsan, etc. see codegen.cpp `make_fn_llvm_value`.
- if (return_type.tag() == .noreturn) {
+ if (return_type.isNoReturn()) {
self.addFnAttr(llvm_fn, "noreturn");
}
@@ -505,24 +559,11 @@ pub const DeclGen = struct {
}
fn resolveGlobalDecl(self: *DeclGen, decl: *Module.Decl) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
- // TODO: do we want to store this in our own datastructure?
- if (self.llvmModule().getNamedGlobal(decl.name)) |val| return val;
-
- assert(decl.has_tv);
-
+ const llvm_module = self.object.llvm_module;
+ if (llvm_module.getNamedGlobal(decl.name)) |val| return val;
// TODO: remove this redundant `llvmType`, it is also called in `genTypedValue`.
const llvm_type = try self.llvmType(decl.ty);
- const global = self.llvmModule().addGlobal(llvm_type, decl.name);
- const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
- const variable = payload.data;
- global.setGlobalConstant(.False);
- break :init_val variable.init;
- } else decl.val;
-
- const llvm_init = try self.genTypedValue(.{ .ty = decl.ty, .val = init_val }, null);
- llvm.setInitializer(global, llvm_init);
-
- return global;
+ return llvm_module.addGlobal(llvm_type, decl.name);
}
fn llvmType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
@@ -534,6 +575,14 @@ pub const DeclGen = struct {
const info = t.intInfo(self.module.getTarget());
return self.context.intType(info.bits);
},
+ .Float => switch (t.floatBits(self.module.getTarget())) {
+ 16 => return self.context.halfType(),
+ 32 => return self.context.floatType(),
+ 64 => return self.context.doubleType(),
+ 80 => return self.context.x86FP80Type(),
+ 128 => return self.context.fp128Type(),
+ else => unreachable,
+ },
.Bool => return self.context.intType(1),
.Pointer => {
if (t.isSlice()) {
@@ -544,7 +593,7 @@ pub const DeclGen = struct {
try self.llvmType(ptr_type),
try self.llvmType(Type.initTag(.usize)),
};
- return self.context.structType(&fields, 2, .False);
+ return self.context.structType(&fields, fields.len, .False);
} else {
const elem_type = try self.llvmType(t.elemType());
return elem_type.pointerType(0);
@@ -552,7 +601,8 @@ pub const DeclGen = struct {
},
.Array => {
const elem_type = try self.llvmType(t.elemType());
- return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
+ const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
+ return elem_type.arrayType(@intCast(c_uint, total_len));
},
.Optional => {
if (!t.isPtrLikeOptional()) {
@@ -571,10 +621,14 @@ pub const DeclGen = struct {
.ErrorUnion => {
const error_type = t.errorUnionSet();
const payload_type = t.errorUnionPayload();
+ const llvm_error_type = try self.llvmType(error_type);
if (!payload_type.hasCodeGenBits()) {
- return self.llvmType(error_type);
+ return llvm_error_type;
}
- return self.todo("implement llvmType for error unions", .{});
+ const llvm_payload_type = try self.llvmType(payload_type);
+
+ const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type };
+ return self.context.structType(&fields, fields.len, .False);
},
.ErrorSet => {
return self.context.intType(16);
@@ -602,12 +656,13 @@ pub const DeclGen = struct {
llvm_param.* = try self.llvmType(t.fnParamType(i));
}
const is_var_args = t.fnIsVarArgs();
- return llvm.functionType(
+ const llvm_fn_ty = llvm.functionType(
ret_ty,
llvm_params.ptr,
@intCast(c_uint, llvm_params.len),
llvm.Bool.fromBool(is_var_args),
);
+ return llvm_fn_ty.pointerType(0);
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -618,7 +673,6 @@ pub const DeclGen = struct {
.BoundFn => @panic("TODO remove BoundFn from the language"),
- .Float,
.Enum,
.Union,
.Opaque,
@@ -629,19 +683,22 @@ pub const DeclGen = struct {
}
}
- // TODO: figure out a way to remove the FuncGen argument
- fn genTypedValue(self: *DeclGen, tv: TypedValue, fg: ?*FuncGen) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
- const llvm_type = try self.llvmType(tv.ty);
-
- if (tv.val.isUndef())
+ fn genTypedValue(self: *DeclGen, tv: TypedValue) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+ if (tv.val.isUndef()) {
+ const llvm_type = try self.llvmType(tv.ty);
return llvm_type.getUndef();
+ }
switch (tv.ty.zigTypeTag()) {
- .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
+ .Bool => {
+ const llvm_type = try self.llvmType(tv.ty);
+ return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
+ },
.Int => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
+ const llvm_type = try self.llvmType(tv.ty);
if (bigint.eqZero()) return llvm_type.constNull();
if (bigint.limbs.len != 1) {
@@ -653,32 +710,63 @@ pub const DeclGen = struct {
}
return llvm_int;
},
+ .Float => {
+ if (tv.ty.floatBits(self.module.getTarget()) <= 64) {
+ const llvm_ty = try self.llvmType(tv.ty);
+ return llvm_ty.constReal(tv.val.toFloat(f64));
+ }
+ return self.todo("bitcast to f128 from an integer", .{});
+ },
.Pointer => switch (tv.val.tag()) {
.decl_ref => {
- const decl = tv.val.castTag(.decl_ref).?.data;
- const val = try self.resolveGlobalDecl(decl);
-
- const usize_type = try self.llvmType(Type.initTag(.usize));
-
- // TODO: second index should be the index into the memory!
- var indices: [2]*const llvm.Value = .{
- usize_type.constNull(),
- usize_type.constNull(),
- };
-
- // TODO: consider using buildInBoundsGEP2 for opaque pointers
- return fg.?.builder.buildInBoundsGEP(val, &indices, 2, "");
- },
- .ref_val => {
- const elem_value = tv.val.castTag(.ref_val).?.data;
- const elem_type = tv.ty.castPointer().?.data;
- const alloca = fg.?.buildAlloca(try self.llvmType(elem_type));
- _ = fg.?.builder.buildStore(try self.genTypedValue(.{ .ty = elem_type, .val = elem_value }, fg), alloca);
- return alloca;
+ if (tv.ty.isSlice()) {
+ var buf: Type.Payload.ElemType = undefined;
+ const ptr_ty = tv.ty.slicePtrFieldType(&buf);
+ var slice_len: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = tv.val.sliceLen(),
+ };
+ const fields: [2]*const llvm.Value = .{
+ try self.genTypedValue(.{
+ .ty = ptr_ty,
+ .val = tv.val,
+ }),
+ try self.genTypedValue(.{
+ .ty = Type.initTag(.usize),
+ .val = Value.initPayload(&slice_len.base),
+ }),
+ };
+ return self.context.constStruct(&fields, fields.len, .False);
+ } else {
+ const decl = tv.val.castTag(.decl_ref).?.data;
+ decl.alive = true;
+ const val = try self.resolveGlobalDecl(decl);
+ const llvm_type = try self.llvmType(tv.ty);
+ return val.constBitCast(llvm_type);
+ }
},
.variable => {
- const variable = tv.val.castTag(.variable).?.data;
- return self.resolveGlobalDecl(variable.owner_decl);
+ const decl = tv.val.castTag(.variable).?.data.owner_decl;
+ decl.alive = true;
+ const val = try self.resolveGlobalDecl(decl);
+ const llvm_var_type = try self.llvmType(tv.ty);
+ const llvm_type = llvm_var_type.pointerType(0);
+ return val.constBitCast(llvm_type);
+ },
+ .slice => {
+ const slice = tv.val.castTag(.slice).?.data;
+ var buf: Type.Payload.ElemType = undefined;
+ const fields: [2]*const llvm.Value = .{
+ try self.genTypedValue(.{
+ .ty = tv.ty.slicePtrFieldType(&buf),
+ .val = slice.ptr,
+ }),
+ try self.genTypedValue(.{
+ .ty = Type.initTag(.usize),
+ .val = slice.len,
+ }),
+ };
+ return self.context.constStruct(&fields, fields.len, .False);
},
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
},
@@ -689,10 +777,28 @@ pub const DeclGen = struct {
return self.todo("handle other sentinel values", .{});
} else false;
- return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
- } else {
- return self.todo("handle more array values", .{});
+ return self.context.constString(
+ payload.data.ptr,
+ @intCast(c_uint, payload.data.len),
+ llvm.Bool.fromBool(!zero_sentinel),
+ );
+ }
+ if (tv.val.castTag(.array)) |payload| {
+ const gpa = self.gpa;
+ const elem_ty = tv.ty.elemType();
+ const elem_vals = payload.data;
+ const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
+ defer gpa.free(llvm_elems);
+ for (elem_vals) |elem_val, i| {
+ llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
+ }
+ const llvm_elem_ty = try self.llvmType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
}
+ return self.todo("handle more array values", .{});
},
.Optional => {
if (!tv.ty.isPtrLikeOptional()) {
@@ -705,18 +811,87 @@ pub const DeclGen = struct {
llvm_child_type.constNull(),
self.context.intType(1).constNull(),
};
- return self.context.constStruct(&optional_values, 2, .False);
+ return self.context.constStruct(&optional_values, optional_values.len, .False);
} else {
var optional_values: [2]*const llvm.Value = .{
- try self.genTypedValue(.{ .ty = child_type, .val = tv.val }, fg),
+ try self.genTypedValue(.{ .ty = child_type, .val = tv.val }),
self.context.intType(1).constAllOnes(),
};
- return self.context.constStruct(&optional_values, 2, .False);
+ return self.context.constStruct(&optional_values, optional_values.len, .False);
}
} else {
return self.todo("implement const of optional pointer", .{});
}
},
+ .Fn => {
+ const fn_decl = switch (tv.val.tag()) {
+ .extern_fn => tv.val.castTag(.extern_fn).?.data,
+ .function => tv.val.castTag(.function).?.data.owner_decl,
+ .decl_ref => tv.val.castTag(.decl_ref).?.data,
+ else => unreachable,
+ };
+ fn_decl.alive = true;
+ return self.resolveLlvmFunction(fn_decl);
+ },
+ .ErrorSet => {
+ const llvm_ty = try self.llvmType(tv.ty);
+ switch (tv.val.tag()) {
+ .@"error" => {
+ const err_name = tv.val.castTag(.@"error").?.data.name;
+ const kv = try self.module.getErrorValue(err_name);
+ return llvm_ty.constInt(kv.value, .False);
+ },
+ else => {
+ // In this case we are rendering an error union which has a 0 bits payload.
+ return llvm_ty.constNull();
+ },
+ }
+ },
+ .ErrorUnion => {
+ const error_type = tv.ty.errorUnionSet();
+ const payload_type = tv.ty.errorUnionPayload();
+ const is_pl = tv.val.errorUnionIsPayload();
+
+ if (!payload_type.hasCodeGenBits()) {
+ // We use the error type directly as the type.
+ const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
+ return self.genTypedValue(.{ .ty = error_type, .val = err_val });
+ }
+
+ const fields: [2]*const llvm.Value = .{
+ try self.genTypedValue(.{
+ .ty = error_type,
+ .val = if (is_pl) Value.initTag(.zero) else tv.val,
+ }),
+ try self.genTypedValue(.{
+ .ty = payload_type,
+ .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef),
+ }),
+ };
+ return self.context.constStruct(&fields, fields.len, .False);
+ },
+ .Struct => {
+ const fields_len = tv.ty.structFieldCount();
+ const field_vals = tv.val.castTag(.@"struct").?.data;
+ const gpa = self.gpa;
+ const llvm_fields = try gpa.alloc(*const llvm.Value, fields_len);
+ defer gpa.free(llvm_fields);
+ for (llvm_fields) |*llvm_field, i| {
+ llvm_field.* = try self.genTypedValue(.{
+ .ty = tv.ty.structFieldType(i),
+ .val = field_vals[i],
+ });
+ }
+ return self.context.constStruct(
+ llvm_fields.ptr,
+ @intCast(c_uint, llvm_fields.len),
+ .False,
+ );
+ },
+ .ComptimeInt => unreachable,
+ .ComptimeFloat => unreachable,
+ .Type => unreachable,
+ .EnumLiteral => unreachable,
else => return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
@@ -788,12 +963,10 @@ pub const FuncGen = struct {
fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value {
if (self.air.value(inst)) |val| {
- return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self);
+ return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val });
}
const inst_index = Air.refToIndex(inst).?;
- if (self.func_inst_table.get(inst_index)) |value| return value;
-
- return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{});
+ return self.func_inst_table.get(inst_index).?;
}
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void {
@@ -801,8 +974,19 @@ pub const FuncGen = struct {
for (body) |inst| {
const opt_value: ?*const llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
- .add => try self.airAdd(inst),
- .sub => try self.airSub(inst),
+ .add => try self.airAdd(inst, false),
+ .addwrap => try self.airAdd(inst, true),
+ .sub => try self.airSub(inst, false),
+ .subwrap => try self.airSub(inst, true),
+ .mul => try self.airMul(inst, false),
+ .mulwrap => try self.airMul(inst, true),
+ .div => try self.airDiv(inst),
+ .ptr_add => try self.airPtrAdd(inst),
+ .ptr_sub => try self.airPtrSub(inst),
+
+ .bit_and, .bool_and => try self.airAnd(inst),
+ .bit_or, .bool_or => try self.airOr(inst),
+ .xor => try self.airXor(inst),
.cmp_eq => try self.airCmp(inst, .eq),
.cmp_gt => try self.airCmp(inst, .gt),
@@ -815,20 +999,24 @@ pub const FuncGen = struct {
.is_non_null_ptr => try self.airIsNonNull(inst, true),
.is_null => try self.airIsNull(inst, false),
.is_null_ptr => try self.airIsNull(inst, true),
- .is_non_err => try self.airIsErr(inst, true, false),
- .is_non_err_ptr => try self.airIsErr(inst, true, true),
- .is_err => try self.airIsErr(inst, false, false),
- .is_err_ptr => try self.airIsErr(inst, false, true),
+ .is_non_err => try self.airIsErr(inst, .EQ, false),
+ .is_non_err_ptr => try self.airIsErr(inst, .EQ, true),
+ .is_err => try self.airIsErr(inst, .NE, false),
+ .is_err_ptr => try self.airIsErr(inst, .NE, true),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
.bitcast => try self.airBitCast(inst),
+ .bool_to_int=> try self.airBoolToInt(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .switch_br => try self.airSwitchBr(inst),
.breakpoint => try self.airBreakpoint(inst),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
+ .trunc => try self.airTrunc(inst),
+ .floatcast => try self.airFloatCast(inst),
.ptrtoint => try self.airPtrToInt(inst),
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
@@ -836,12 +1024,16 @@ pub const FuncGen = struct {
.ret => try self.airRet(inst),
.store => try self.airStore(inst),
.assembly => try self.airAssembly(inst),
- .varptr => try self.airVarPtr(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
- .slice_elem_val => try self.airSliceElemVal(inst, false),
- .ptr_slice_elem_val => try self.airSliceElemVal(inst, true),
+ .struct_field_ptr => try self.airStructFieldPtr(inst),
+ .struct_field_val => try self.airStructFieldVal(inst),
+
+ .slice_elem_val => try self.airSliceElemVal(inst),
+ .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .ptr_elem_val => try self.airPtrElemVal(inst),
+ .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.optional_payload => try self.airOptionalPayload(inst, false),
.optional_payload_ptr => try self.airOptionalPayload(inst, true),
@@ -851,12 +1043,17 @@ pub const FuncGen = struct {
.unwrap_errunion_err => try self.airErrUnionErr(inst, false),
.unwrap_errunion_err_ptr => try self.airErrUnionErr(inst, true),
+ .wrap_optional => try self.airWrapOptional(inst),
+ .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
+ .wrap_errunion_err => try self.airWrapErrUnionErr(inst),
+
+ .constant => unreachable,
+ .const_ty => unreachable,
.unreach => self.airUnreach(inst),
.dbg_stmt => blk: {
// TODO: implement debug info
break :blk null;
},
- else => |tag| return self.todo("implement AIR instruction: {}", .{tag}),
// zig fmt: on
};
if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
@@ -867,47 +1064,32 @@ pub const FuncGen = struct {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const zig_fn_type = self.air.typeOf(pl_op.operand);
+ const return_type = zig_fn_type.fnReturnType();
+ const llvm_fn = try self.resolveInst(pl_op.operand);
- if (self.air.value(pl_op.operand)) |func_value| {
- const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
- extern_fn.data
- else if (func_value.castTag(.function)) |func_payload|
- func_payload.data.owner_decl
- else
- unreachable;
-
- assert(fn_decl.has_tv);
- const zig_fn_type = fn_decl.ty;
- const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl);
+ const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len);
+ defer self.gpa.free(llvm_param_vals);
- const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len);
- defer self.gpa.free(llvm_param_vals);
+ for (args) |arg, i| {
+ llvm_param_vals[i] = try self.resolveInst(arg);
+ }
- for (args) |arg, i| {
- llvm_param_vals[i] = try self.resolveInst(arg);
- }
+ const call = self.builder.buildCall(
+ llvm_fn,
+ llvm_param_vals.ptr,
+ @intCast(c_uint, args.len),
+ "",
+ );
- // TODO: LLVMBuildCall2 handles opaque function pointers, according to llvm docs
- // Do we need that?
- const call = self.builder.buildCall(
- llvm_fn,
- llvm_param_vals.ptr,
- @intCast(c_uint, args.len),
- "",
- );
-
- const return_type = zig_fn_type.fnReturnType();
- if (return_type.tag() == .noreturn) {
- _ = self.builder.buildUnreachable();
- }
+ if (return_type.isNoReturn()) {
+ _ = self.builder.buildUnreachable();
+ }
- // No need to store the LLVM value if the return type is void or noreturn
- if (!return_type.hasCodeGenBits()) return null;
+ // No need to store the LLVM value if the return type is void or noreturn
+ if (!return_type.hasCodeGenBits()) return null;
- return call;
- } else {
- return self.todo("implement calling runtime known function pointer", .{});
- }
+ return call;
}
fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -930,21 +1112,32 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (!inst_ty.isInt())
- if (inst_ty.tag() != .bool)
- return self.todo("implement 'airCmp' for type {}", .{inst_ty});
-
- const is_signed = inst_ty.isSignedInt();
- const operation = switch (op) {
- .eq => .EQ,
- .neq => .NE,
- .lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
- .lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
- .gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
- .gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
- };
-
- return self.builder.buildICmp(operation, lhs, rhs, "");
+ switch (self.air.typeOf(bin_op.lhs).zigTypeTag()) {
+ .Int, .Bool, .Pointer, .ErrorSet => {
+ const is_signed = inst_ty.isSignedInt();
+ const operation = switch (op) {
+ .eq => .EQ,
+ .neq => .NE,
+ .lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
+ .lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
+ .gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
+ .gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
+ };
+ return self.builder.buildICmp(operation, lhs, rhs, "");
+ },
+ .Float => {
+ const operation: llvm.RealPredicate = switch (op) {
+ .eq => .OEQ,
+ .neq => .UNE,
+ .lt => .OLT,
+ .lte => .OLE,
+ .gt => .OGT,
+ .gte => .OGE,
+ };
+ return self.builder.buildFCmp(operation, lhs, rhs, "");
+ },
+ else => unreachable,
+ }
}
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1026,6 +1219,11 @@ pub const FuncGen = struct {
return null;
}
+ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ _ = inst;
+ return self.todo("implement llvm codegen for switch_br", .{});
+ }
+
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
@@ -1040,16 +1238,6 @@ pub const FuncGen = struct {
return null;
}
- fn airVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const variable = self.air.variables[ty_pl.payload];
- const decl_llvm_value = self.dg.resolveGlobalDecl(variable.owner_decl);
- return decl_llvm_value;
- }
-
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1059,33 +1247,88 @@ pub const FuncGen = struct {
return self.builder.buildExtractValue(operand, index, "");
}
- fn airSliceElemVal(
- self: *FuncGen,
- inst: Air.Inst.Index,
- operand_is_ptr: bool,
- ) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
+ fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const is_volatile = false; // TODO
+ if (!is_volatile and self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
+ const base_ptr = self.builder.buildExtractValue(lhs, 0, "");
+ const indices: [1]*const llvm.Value = .{rhs};
+ const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ return self.builder.buildLoad(ptr, "");
+ }
- const base_ptr = if (!operand_is_ptr) lhs else ptr: {
- const index_type = self.context.intType(32);
- const indices: [2]*const llvm.Value = .{
- index_type.constNull(),
- index_type.constInt(0, .False),
- };
- const ptr_field_ptr = self.builder.buildInBoundsGEP(lhs, &indices, 2, "");
+ fn airPtrSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const is_volatile = false; // TODO
+ if (!is_volatile and self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ const base_ptr = ptr: {
+ const ptr_field_ptr = self.builder.buildStructGEP(lhs, 0, "");
break :ptr self.builder.buildLoad(ptr_field_ptr, "");
};
const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, 1, "");
+ const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ return self.builder.buildLoad(ptr, "");
+ }
+
+ fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const is_volatile = false; // TODO
+ if (!is_volatile and self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const base_ptr = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const indices: [1]*const llvm.Value = .{rhs};
+ const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ return self.builder.buildLoad(ptr, "");
+ }
+
+ fn airPtrPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const is_volatile = false; // TODO
+ if (!is_volatile and self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const base_ptr = self.builder.buildLoad(lhs, "");
+ const indices: [1]*const llvm.Value = .{rhs};
+ const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
return self.builder.buildLoad(ptr, "");
}
+ fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_ptr = try self.resolveInst(struct_field.struct_operand);
+ const field_index = @intCast(c_uint, struct_field.field_index);
+ return self.builder.buildStructGEP(struct_ptr, field_index, "");
+ }
+
+ fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_byval = try self.resolveInst(struct_field.struct_operand);
+ const field_index = @intCast(c_uint, struct_field.field_index);
+ return self.builder.buildExtractValue(struct_byval, field_index, "");
+ }
+
fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1239,7 +1482,7 @@ pub const FuncGen = struct {
index_type.constInt(1, .False),
};
- return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, 2, ""), "");
+ return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""), "");
} else {
return self.builder.buildExtractValue(operand, 1, "");
}
@@ -1255,7 +1498,7 @@ pub const FuncGen = struct {
fn airIsErr(
self: *FuncGen,
inst: Air.Inst.Index,
- invert_logic: bool,
+ op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
@@ -1265,16 +1508,22 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(un_op);
const err_union_ty = self.air.typeOf(un_op);
const payload_ty = err_union_ty.errorUnionPayload();
+ const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror));
+ const zero = err_set_ty.constNull();
if (!payload_ty.hasCodeGenBits()) {
const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand;
- const op: llvm.IntPredicate = if (invert_logic) .EQ else .NE;
- const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror));
- const zero = err_set_ty.constNull();
return self.builder.buildICmp(op, loaded, zero, "");
}
- return self.todo("implement 'airIsErr' for error unions with nonzero payload", .{});
+ if (operand_is_ptr) {
+ const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
+ const loaded = self.builder.buildLoad(err_field_ptr, "");
+ return self.builder.buildICmp(op, loaded, zero, "");
+ }
+
+ const loaded = self.builder.buildExtractValue(operand, 0, "");
+ return self.builder.buildICmp(op, loaded, zero, "");
}
fn airOptionalPayload(
@@ -1319,9 +1568,11 @@ pub const FuncGen = struct {
return null;
}
- _ = operand;
- _ = operand_is_ptr;
- return self.todo("implement 'airErrUnionPayload' for type {}", .{self.air.typeOf(ty_op.operand)});
+ if (operand_is_ptr) {
+ return self.builder.buildStructGEP(operand, 1, "");
+ }
+
+ return self.builder.buildExtractValue(operand, 1, "");
}
fn airErrUnionErr(
@@ -1332,42 +1583,152 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst))
return null;
- _ = operand_is_ptr;
- return self.todo("implement 'airErrUnionErr'", .{});
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const operand_ty = self.air.typeOf(ty_op.operand);
+
+ const payload_ty = operand_ty.errorUnionPayload();
+ if (!payload_ty.hasCodeGenBits()) {
+ if (!operand_is_ptr) return operand;
+ return self.builder.buildLoad(operand, "");
+ }
+
+ if (operand_is_ptr) {
+ const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
+ return self.builder.buildLoad(err_field_ptr, "");
+ }
+
+ return self.builder.buildExtractValue(operand, 0, "");
+ }
+
+ fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ return self.todo("implement llvm codegen for 'airWrapOptional'", .{});
+ }
+
+ fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ return self.todo("implement llvm codegen for 'airWrapErrUnionPayload'", .{});
+ }
+
+ fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{});
}
- fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAdd(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
+
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (!inst_ty.isInt())
- return self.todo("implement 'airAdd' for type {}", .{inst_ty});
+ if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, "");
+ if (wrap) return self.builder.buildAdd(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, "");
+ return self.builder.buildNUWAdd(lhs, rhs, "");
+ }
- return if (inst_ty.isSignedInt())
- self.builder.buildNSWAdd(lhs, rhs, "")
- else
- self.builder.buildNUWAdd(lhs, rhs, "");
+ fn airSub(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, "");
+ if (wrap) return self.builder.buildSub(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, "");
+ return self.builder.buildNUWSub(lhs, rhs, "");
}
- fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMul(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
+
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (!inst_ty.isInt())
- return self.todo("implement 'airSub' for type {}", .{inst_ty});
+ if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, "");
+ if (wrap) return self.builder.buildMul(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, "");
+ return self.builder.buildNUWMul(lhs, rhs, "");
+ }
- return if (inst_ty.isSignedInt())
- self.builder.buildNSWSub(lhs, rhs, "")
- else
- self.builder.buildNUWSub(lhs, rhs, "");
+ fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
+ return self.builder.buildUDiv(lhs, rhs, "");
+ }
+
+ fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const base_ptr = try self.resolveInst(bin_op.lhs);
+ const offset = try self.resolveInst(bin_op.rhs);
+ const indices: [1]*const llvm.Value = .{offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
+
+ fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const base_ptr = try self.resolveInst(bin_op.lhs);
+ const offset = try self.resolveInst(bin_op.rhs);
+ const negative_offset = self.builder.buildNeg(offset, "");
+ const indices: [1]*const llvm.Value = .{negative_offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
+
+ fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ return self.builder.buildAnd(lhs, rhs, "");
+ }
+
+ fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ return self.builder.buildOr(lhs, rhs, "");
+ }
+
+ fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ return self.builder.buildXor(lhs, rhs, "");
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1384,6 +1745,24 @@ pub const FuncGen = struct {
return self.builder.buildIntCast2(operand, try self.dg.llvmType(inst_ty), llvm.Bool.fromBool(signed), "");
}
+ fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ return self.builder.buildTrunc(operand, dest_llvm_ty, "");
+ }
+
+ fn airFloatCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ // TODO split floatcast AIR into float_widen and float_shorten
+ return self.todo("implement 'airFloatCast'", .{});
+ }
+
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1406,6 +1785,15 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(operand, dest_type, "");
}
+ fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ return operand;
+ }
+
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
@@ -1474,8 +1862,8 @@ pub const FuncGen = struct {
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
_ = inst;
- const llvn_fn = self.getIntrinsic("llvm.debugtrap");
- _ = self.builder.buildCall(llvn_fn, undefined, 0, "");
+ const llvm_fn = self.getIntrinsic("llvm.debugtrap");
+ _ = self.builder.buildCall(llvm_fn, undefined, 0, "");
return null;
}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 3b76361030..4bb8a4a18b 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -31,6 +31,21 @@ pub const Context = opaque {
pub const intType = LLVMIntTypeInContext;
extern fn LLVMIntTypeInContext(C: *const Context, NumBits: c_uint) *const Type;
+ pub const halfType = LLVMHalfTypeInContext;
+ extern fn LLVMHalfTypeInContext(C: *const Context) *const Type;
+
+ pub const floatType = LLVMFloatTypeInContext;
+ extern fn LLVMFloatTypeInContext(C: *const Context) *const Type;
+
+ pub const doubleType = LLVMDoubleTypeInContext;
+ extern fn LLVMDoubleTypeInContext(C: *const Context) *const Type;
+
+ pub const x86FP80Type = LLVMX86FP80TypeInContext;
+ extern fn LLVMX86FP80TypeInContext(C: *const Context) *const Type;
+
+ pub const fp128Type = LLVMFP128TypeInContext;
+ extern fn LLVMFP128TypeInContext(C: *const Context) *const Type;
+
pub const voidType = LLVMVoidTypeInContext;
extern fn LLVMVoidTypeInContext(C: *const Context) *const Type;
@@ -49,7 +64,12 @@ pub const Context = opaque {
extern fn LLVMConstStringInContext(C: *const Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) *const Value;
pub const constStruct = LLVMConstStructInContext;
- extern fn LLVMConstStructInContext(C: *const Context, ConstantVals: [*]*const Value, Count: c_uint, Packed: Bool) *const Value;
+ extern fn LLVMConstStructInContext(
+ C: *const Context,
+ ConstantVals: [*]const *const Value,
+ Count: c_uint,
+ Packed: Bool,
+ ) *const Value;
pub const createBasicBlock = LLVMCreateBasicBlockInContext;
extern fn LLVMCreateBasicBlockInContext(C: *const Context, Name: [*:0]const u8) *const BasicBlock;
@@ -82,6 +102,34 @@ pub const Value = opaque {
pub const setGlobalConstant = LLVMSetGlobalConstant;
extern fn LLVMSetGlobalConstant(GlobalVar: *const Value, IsConstant: Bool) void;
+
+ pub const setLinkage = LLVMSetLinkage;
+ extern fn LLVMSetLinkage(Global: *const Value, Linkage: Linkage) void;
+
+ pub const setUnnamedAddr = LLVMSetUnnamedAddr;
+ extern fn LLVMSetUnnamedAddr(Global: *const Value, HasUnnamedAddr: Bool) void;
+
+ pub const deleteGlobal = LLVMDeleteGlobal;
+ extern fn LLVMDeleteGlobal(GlobalVar: *const Value) void;
+
+ pub const getNextGlobalAlias = LLVMGetNextGlobalAlias;
+ extern fn LLVMGetNextGlobalAlias(GA: *const Value) *const Value;
+
+ pub const getAliasee = LLVMAliasGetAliasee;
+ extern fn LLVMAliasGetAliasee(Alias: *const Value) *const Value;
+
+ pub const setAliasee = LLVMAliasSetAliasee;
+ extern fn LLVMAliasSetAliasee(Alias: *const Value, Aliasee: *const Value) void;
+
+ pub const constInBoundsGEP = LLVMConstInBoundsGEP;
+ extern fn LLVMConstInBoundsGEP(
+ ConstantVal: *const Value,
+ ConstantIndices: [*]const *const Value,
+ NumIndices: c_uint,
+ ) *const Value;
+
+ pub const constBitCast = LLVMConstBitCast;
+ extern fn LLVMConstBitCast(ConstantVal: *const Value, ToType: *const Type) *const Value;
};
pub const Type = opaque {
@@ -94,8 +142,11 @@ pub const Type = opaque {
pub const constInt = LLVMConstInt;
extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: Bool) *const Value;
+ pub const constReal = LLVMConstReal;
+ extern fn LLVMConstReal(RealTy: *const Type, N: f64) *const Value;
+
pub const constArray = LLVMConstArray;
- extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: ?[*]*const Value, Length: c_uint) *const Value;
+ extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: [*]*const Value, Length: c_uint) *const Value;
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *const Type) *const Value;
@@ -145,6 +196,30 @@ pub const Module = opaque {
pub const dump = LLVMDumpModule;
extern fn LLVMDumpModule(M: *const Module) void;
+
+ pub const getFirstGlobalAlias = LLVMGetFirstGlobalAlias;
+ extern fn LLVMGetFirstGlobalAlias(M: *const Module) *const Value;
+
+ pub const getLastGlobalAlias = LLVMGetLastGlobalAlias;
+ extern fn LLVMGetLastGlobalAlias(M: *const Module) *const Value;
+
+ pub const addAlias = LLVMAddAlias;
+ extern fn LLVMAddAlias(
+ M: *const Module,
+ Ty: *const Type,
+ Aliasee: *const Value,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const getNamedGlobalAlias = LLVMGetNamedGlobalAlias;
+ extern fn LLVMGetNamedGlobalAlias(
+ M: *const Module,
+ /// Empirically, LLVM will call strlen() on `Name` and so it
+ /// must be both null terminated and also have `NameLen` set
+ /// to the size.
+ Name: [*:0]const u8,
+ NameLen: usize,
+ ) ?*const Value;
};
pub const lookupIntrinsicID = LLVMLookupIntrinsicID;
@@ -249,21 +324,66 @@ pub const Builder = opaque {
pub const buildLoad = LLVMBuildLoad;
extern fn LLVMBuildLoad(*const Builder, PointerVal: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildNeg = LLVMBuildNeg;
+ extern fn LLVMBuildNeg(*const Builder, V: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildNot = LLVMBuildNot;
extern fn LLVMBuildNot(*const Builder, V: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildFAdd = LLVMBuildFAdd;
+ extern fn LLVMBuildFAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildAdd = LLVMBuildAdd;
+ extern fn LLVMBuildAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildNSWAdd = LLVMBuildNSWAdd;
extern fn LLVMBuildNSWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildNUWAdd = LLVMBuildNUWAdd;
extern fn LLVMBuildNUWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildFSub = LLVMBuildFSub;
+ extern fn LLVMBuildFSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildSub = LLVMBuildSub;
+ extern fn LLVMBuildSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildNSWSub = LLVMBuildNSWSub;
extern fn LLVMBuildNSWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildNUWSub = LLVMBuildNUWSub;
extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildFMul = LLVMBuildFMul;
+ extern fn LLVMBuildFMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildMul = LLVMBuildMul;
+ extern fn LLVMBuildMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNSWMul = LLVMBuildNSWMul;
+ extern fn LLVMBuildNSWMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNUWMul = LLVMBuildNUWMul;
+ extern fn LLVMBuildNUWMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildUDiv = LLVMBuildUDiv;
+ extern fn LLVMBuildUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildSDiv = LLVMBuildSDiv;
+ extern fn LLVMBuildSDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildFDiv = LLVMBuildFDiv;
+ extern fn LLVMBuildFDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildAnd = LLVMBuildAnd;
+ extern fn LLVMBuildAnd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildOr = LLVMBuildOr;
+ extern fn LLVMBuildOr(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildXor = LLVMBuildXor;
+ extern fn LLVMBuildXor(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildIntCast2 = LLVMBuildIntCast2;
extern fn LLVMBuildIntCast2(*const Builder, Val: *const Value, DestTy: *const Type, IsSigned: Bool, Name: [*:0]const u8) *const Value;
@@ -279,9 +399,22 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
+ pub const buildInBoundsGEP2 = LLVMBuildInBoundsGEP2;
+ extern fn LLVMBuildInBoundsGEP2(
+ B: *const Builder,
+ Ty: *const Type,
+ Pointer: *const Value,
+ Indices: [*]const *const Value,
+ NumIndices: c_uint,
+ Name: [*:0]const u8,
+ ) *const Value;
+
pub const buildICmp = LLVMBuildICmp;
extern fn LLVMBuildICmp(*const Builder, Op: IntPredicate, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildFCmp = LLVMBuildFCmp;
+ extern fn LLVMBuildFCmp(*const Builder, Op: RealPredicate, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildBr = LLVMBuildBr;
extern fn LLVMBuildBr(*const Builder, Dest: *const BasicBlock) *const Value;
@@ -292,13 +425,39 @@ pub const Builder = opaque {
extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
pub const buildExtractValue = LLVMBuildExtractValue;
- extern fn LLVMBuildExtractValue(*const Builder, AggVal: *const Value, Index: c_uint, Name: [*:0]const u8) *const Value;
+ extern fn LLVMBuildExtractValue(
+ *const Builder,
+ AggVal: *const Value,
+ Index: c_uint,
+ Name: [*:0]const u8,
+ ) *const Value;
pub const buildPtrToInt = LLVMBuildPtrToInt;
- extern fn LLVMBuildPtrToInt(*const Builder, Val: *const Value, DestTy: *const Type, Name: [*:0]const u8) *const Value;
+ extern fn LLVMBuildPtrToInt(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildStructGEP = LLVMBuildStructGEP;
+ extern fn LLVMBuildStructGEP(
+ B: *const Builder,
+ Pointer: *const Value,
+ Idx: c_uint,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildTrunc = LLVMBuildTrunc;
+ extern fn LLVMBuildTrunc(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
};
-pub const IntPredicate = enum(c_int) {
+pub const IntPredicate = enum(c_uint) {
EQ = 32,
NE = 33,
UGT = 34,
@@ -311,6 +470,23 @@ pub const IntPredicate = enum(c_int) {
SLE = 41,
};
+pub const RealPredicate = enum(c_uint) {
+ OEQ = 1,
+ OGT = 2,
+ OGE = 3,
+ OLT = 4,
+ OLE = 5,
+ ONE = 6,
+ ORD = 7,
+ UNO = 8,
+ UEQ = 9,
+ UGT = 10,
+ UGE = 11,
+ ULT = 12,
+ ULE = 13,
+ UNE = 14,
+};
+
pub const BasicBlock = opaque {
pub const deleteBasicBlock = LLVMDeleteBasicBlock;
extern fn LLVMDeleteBasicBlock(BB: *const BasicBlock) void;
@@ -715,3 +891,23 @@ extern fn ZigLLVMWriteImportLibrary(
output_lib_path: [*c]const u8,
kill_at: bool,
) bool;
+
+pub const Linkage = enum(c_uint) {
+ External,
+ AvailableExternally,
+ LinkOnceAny,
+ LinkOnceODR,
+ LinkOnceODRAutoHide,
+ WeakAny,
+ WeakODR,
+ Appending,
+ Internal,
+ Private,
+ DLLImport,
+ DLLExport,
+ ExternalWeak,
+ Ghost,
+ Common,
+ LinkerPrivate,
+ LinkerPrivateWeak,
+};
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index ca0d53988d..4814ba0b55 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -590,8 +590,8 @@ pub const Context = struct {
.Pointer,
.ErrorSet,
=> wasm.Valtype.i32,
- .Struct, .ErrorUnion => unreachable, // Multi typed, must be handled individually.
- else => self.fail("TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}),
+ .Struct, .ErrorUnion, .Optional => unreachable, // Multi typed, must be handled individually.
+ else => |tag| self.fail("TODO - Wasm valtype for type '{s}'", .{tag}),
};
}
@@ -634,7 +634,7 @@ pub const Context = struct {
// for each struct field, generate a local
const struct_data: *Module.Struct = ty.castTag(.@"struct").?.data;
const fields_len = @intCast(u32, struct_data.fields.count());
- try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len);
+ try self.locals.ensureUnusedCapacity(self.gpa, fields_len);
for (struct_data.fields.values()) |*value| {
const val_type = try self.genValtype(value.ty);
self.locals.appendAssumeCapacity(val_type);
@@ -653,7 +653,7 @@ pub const Context = struct {
// The first local is also used to find the index of the error and payload.
//
// TODO: Add support where the payload is a type that contains multiple locals such as a struct.
- try self.locals.ensureCapacity(self.gpa, self.locals.items.len + 2);
+ try self.locals.ensureUnusedCapacity(self.gpa, 2);
self.locals.appendAssumeCapacity(wasm.valtype(.i32)); // error values are always i32
self.locals.appendAssumeCapacity(val_type);
self.local_index += 2;
@@ -663,6 +663,23 @@ pub const Context = struct {
.count = 2,
} };
},
+ .Optional => {
+ var opt_buf: Type.Payload.ElemType = undefined;
+ const child_type = ty.optionalChild(&opt_buf);
+ if (ty.isPtrLikeOptional()) {
+ return self.fail("TODO: wasm optional pointer", .{});
+ }
+
+ try self.locals.ensureUnusedCapacity(self.gpa, 2);
+ self.locals.appendAssumeCapacity(wasm.valtype(.i32)); // optional 'tag' for null-checking is always i32
+ self.locals.appendAssumeCapacity(try self.genValtype(child_type));
+ self.local_index += 2;
+
+ return WValue{ .multi_value = .{
+ .index = initial_index,
+ .count = 2,
+ } };
+ },
else => {
const valtype = try self.genValtype(ty);
try self.locals.append(self.gpa, valtype);
@@ -754,22 +771,21 @@ pub const Context = struct {
}
/// Generates the wasm bytecode for the declaration belonging to `Context`
- pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result {
- switch (typed_value.ty.zigTypeTag()) {
+ pub fn gen(self: *Context, ty: Type, val: Value) InnerError!Result {
+ switch (ty.zigTypeTag()) {
.Fn => {
try self.genFunctype();
- if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
+ if (val.tag() == .extern_fn) {
+ return Result.appended; // don't need code body for extern functions
+ }
return self.fail("TODO implement wasm codegen for function pointers", .{});
},
.Array => {
- if (typed_value.val.castTag(.bytes)) |payload| {
- if (typed_value.ty.sentinel()) |sentinel| {
+ if (val.castTag(.bytes)) |payload| {
+ if (ty.sentinel()) |sentinel| {
try self.code.appendSlice(payload.data);
- switch (try self.gen(.{
- .ty = typed_value.ty.elemType(),
- .val = sentinel,
- })) {
+ switch (try self.gen(ty.elemType(), sentinel)) {
.appended => return Result.appended,
.externally_managed => |data| {
try self.code.appendSlice(data);
@@ -781,13 +797,17 @@ pub const Context = struct {
} else return self.fail("TODO implement gen for more kinds of arrays", .{});
},
.Int => {
- const info = typed_value.ty.intInfo(self.target);
+ const info = ty.intInfo(self.target);
if (info.bits == 8 and info.signedness == .unsigned) {
- const int_byte = typed_value.val.toUnsignedInt();
+ const int_byte = val.toUnsignedInt();
try self.code.append(@intCast(u8, int_byte));
return Result.appended;
}
- return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty});
+ return self.fail("TODO: Implement codegen for int type: '{}'", .{ty});
+ },
+ .Enum => {
+ try self.emitConstant(val, ty);
+ return Result.appended;
},
else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}),
}
@@ -797,8 +817,11 @@ pub const Context = struct {
const air_tags = self.air.instructions.items(.tag);
return switch (air_tags[inst]) {
.add => self.airBinOp(inst, .add),
+ .addwrap => self.airWrapBinOp(inst, .add),
.sub => self.airBinOp(inst, .sub),
+ .subwrap => self.airWrapBinOp(inst, .sub),
.mul => self.airBinOp(inst, .mul),
+ .mulwrap => self.airWrapBinOp(inst, .mul),
.div => self.airBinOp(inst, .div),
.bit_and => self.airBinOp(inst, .@"and"),
.bit_or => self.airBinOp(inst, .@"or"),
@@ -823,8 +846,16 @@ pub const Context = struct {
.cond_br => self.airCondBr(inst),
.constant => unreachable,
.dbg_stmt => WValue.none,
+ .intcast => self.airIntcast(inst),
+
.is_err => self.airIsErr(inst, .i32_ne),
.is_non_err => self.airIsErr(inst, .i32_eq),
+
+ .is_null => self.airIsNull(inst, .i32_ne),
+ .is_non_null => self.airIsNull(inst, .i32_eq),
+ .is_null_ptr => self.airIsNull(inst, .i32_ne),
+ .is_non_null_ptr => self.airIsNull(inst, .i32_eq),
+
.load => self.airLoad(inst),
.loop => self.airLoop(inst),
.not => self.airNot(inst),
@@ -833,8 +864,13 @@ pub const Context = struct {
.struct_field_ptr => self.airStructFieldPtr(inst),
.switch_br => self.airSwitchBr(inst),
.unreach => self.airUnreachable(inst),
+ .wrap_optional => self.airWrapOptional(inst),
+
.unwrap_errunion_payload => self.airUnwrapErrUnionPayload(inst),
.wrap_errunion_payload => self.airWrapErrUnionPayload(inst),
+
+ .optional_payload => self.airOptionalPayload(inst),
+ .optional_payload_ptr => self.airOptionalPayload(inst),
else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
};
}
@@ -919,6 +955,22 @@ pub const Context = struct {
try leb.writeULEB128(writer, multi_value.index + i - 1);
}
},
+ .local => {
+ // This can occur when we wrap a single value into a multi-value,
+ // such as wrapping a non-optional value into an optional.
+ // This means we must zero the null-tag, and set the payload.
+ assert(multi_value.count == 2);
+ // set null-tag
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 0));
+ try writer.writeByte(wasm.opcode(.local_set));
+ try leb.writeULEB128(writer, multi_value.index);
+
+ // set payload
+ try self.emitWValue(rhs);
+ try writer.writeByte(wasm.opcode(.local_set));
+ try leb.writeULEB128(writer, multi_value.index + 1);
+ },
else => unreachable,
},
.local => |local| {
@@ -969,7 +1021,63 @@ pub const Context = struct {
return WValue{ .code_offset = offset };
}
- fn emitConstant(self: *Context, value: Value, ty: Type) InnerError!void {
+ fn airWrapBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = self.resolveInst(bin_op.lhs);
+ const rhs = self.resolveInst(bin_op.rhs);
+
+ // it's possible for both lhs and/or rhs to return an offset as well,
+ // in which case we return the first offset occurance we find.
+ const offset = blk: {
+ if (lhs == .code_offset) break :blk lhs.code_offset;
+ if (rhs == .code_offset) break :blk rhs.code_offset;
+ break :blk self.code.items.len;
+ };
+
+ try self.emitWValue(lhs);
+ try self.emitWValue(rhs);
+
+ const bin_ty = self.air.typeOf(bin_op.lhs);
+ const opcode: wasm.Opcode = buildOpcode(.{
+ .op = op,
+ .valtype1 = try self.typeToValtype(bin_ty),
+ .signedness = if (bin_ty.isSignedInt()) .signed else .unsigned,
+ });
+ try self.code.append(wasm.opcode(opcode));
+
+ const int_info = bin_ty.intInfo(self.target);
+ const bitsize = int_info.bits;
+ const is_signed = int_info.signedness == .signed;
+ // if target type bitsize is x < 32 and 32 > x < 64, we perform
+ // result & ((1<<N)-1) where N = bitsize or bitsize -1 incase of signed.
+ if (bitsize != 32 and bitsize < 64) {
+ // first check if we can use a single instruction,
+ // wasm provides those if the integers are signed and 8/16-bit.
+ // For arbitrary integer sizes, we use the algorithm mentioned above.
+ if (is_signed and bitsize == 8) {
+ try self.code.append(wasm.opcode(.i32_extend8_s));
+ } else if (is_signed and bitsize == 16) {
+ try self.code.append(wasm.opcode(.i32_extend16_s));
+ } else {
+ const result = (@as(u64, 1) << @intCast(u6, bitsize - @boolToInt(is_signed))) - 1;
+ if (bitsize < 32) {
+ try self.code.append(wasm.opcode(.i32_const));
+ try leb.writeILEB128(self.code.writer(), @bitCast(i32, @intCast(u32, result)));
+ try self.code.append(wasm.opcode(.i32_and));
+ } else {
+ try self.code.append(wasm.opcode(.i64_const));
+ try leb.writeILEB128(self.code.writer(), @bitCast(i64, result));
+ try self.code.append(wasm.opcode(.i64_and));
+ }
+ }
+ } else if (int_info.bits > 64) {
+ return self.fail("TODO wasm: Integer wrapping for bitsizes larger than 64", .{});
+ }
+
+ return WValue{ .code_offset = offset };
+ }
+
+ fn emitConstant(self: *Context, val: Value, ty: Type) InnerError!void {
const writer = self.code.writer();
switch (ty.zigTypeTag()) {
.Int => {
@@ -982,10 +1090,10 @@ pub const Context = struct {
const int_info = ty.intInfo(self.target);
// write constant
switch (int_info.signedness) {
- .signed => try leb.writeILEB128(writer, value.toSignedInt()),
+ .signed => try leb.writeILEB128(writer, val.toSignedInt()),
.unsigned => switch (int_info.bits) {
- 0...32 => try leb.writeILEB128(writer, @bitCast(i32, @intCast(u32, value.toUnsignedInt()))),
- 33...64 => try leb.writeILEB128(writer, @bitCast(i64, value.toUnsignedInt())),
+ 0...32 => try leb.writeILEB128(writer, @bitCast(i32, @intCast(u32, val.toUnsignedInt()))),
+ 33...64 => try leb.writeILEB128(writer, @bitCast(i64, val.toUnsignedInt())),
else => |bits| return self.fail("Wasm TODO: emitConstant for integer with {d} bits", .{bits}),
},
}
@@ -994,7 +1102,7 @@ pub const Context = struct {
// write opcode
try writer.writeByte(wasm.opcode(.i32_const));
// write constant
- try leb.writeILEB128(writer, value.toSignedInt());
+ try leb.writeILEB128(writer, val.toSignedInt());
},
.Float => {
// write opcode
@@ -1005,14 +1113,15 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(opcode));
// write constant
switch (ty.floatBits(self.target)) {
- 0...32 => try writer.writeIntLittle(u32, @bitCast(u32, value.toFloat(f32))),
- 64 => try writer.writeIntLittle(u64, @bitCast(u64, value.toFloat(f64))),
+ 0...32 => try writer.writeIntLittle(u32, @bitCast(u32, val.toFloat(f32))),
+ 64 => try writer.writeIntLittle(u64, @bitCast(u64, val.toFloat(f64))),
else => |bits| return self.fail("Wasm TODO: emitConstant for float with {d} bits", .{bits}),
}
},
.Pointer => {
- if (value.castTag(.decl_ref)) |payload| {
+ if (val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
+ decl.alive = true;
// offset into the offset table within the 'data' section
const ptr_width = self.target.cpu.arch.ptrBitWidth() / 8;
@@ -1024,11 +1133,11 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.i32_load));
try leb.writeULEB128(writer, @as(u32, 0));
try leb.writeULEB128(writer, @as(u32, 0));
- } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()});
+ } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{val.tag()});
},
.Void => {},
.Enum => {
- if (value.castTag(.enum_field_index)) |field_index| {
+ if (val.castTag(.enum_field_index)) |field_index| {
switch (ty.tag()) {
.enum_simple => {
try writer.writeByte(wasm.opcode(.i32_const));
@@ -1049,21 +1158,27 @@ pub const Context = struct {
} else {
var int_tag_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&int_tag_buffer);
- try self.emitConstant(value, int_tag_ty);
+ try self.emitConstant(val, int_tag_ty);
}
},
.ErrorSet => {
- const error_index = self.global_error_set.get(value.getError().?).?;
+ const error_index = self.global_error_set.get(val.getError().?).?;
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeULEB128(writer, error_index);
},
.ErrorUnion => {
- const data = value.castTag(.error_union).?.data;
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
- if (value.getError()) |_| {
- // write the error value
- try self.emitConstant(data, error_type);
+ if (val.castTag(.eu_payload)) |pl| {
+ const payload_val = pl.data;
+ // no error, so write a '0' const
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 0));
+ // after the error code, we emit the payload
+ try self.emitConstant(payload_val, payload_type);
+ } else {
+ // write the error val
+ try self.emitConstant(val, error_type);
// no payload, so write a '0' const
const opcode: wasm.Opcode = buildOpcode(.{
@@ -1072,12 +1187,31 @@ pub const Context = struct {
});
try writer.writeByte(wasm.opcode(opcode));
try leb.writeULEB128(writer, @as(u32, 0));
- } else {
- // no error, so write a '0' const
+ }
+ },
+ .Optional => {
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_type = ty.optionalChild(&buf);
+ if (ty.isPtrLikeOptional()) {
+ return self.fail("Wasm TODO: emitConstant for optional pointer", .{});
+ }
+
+ // When constant has value 'null', set is_null local to '1'
+ // and payload to '0'
+ if (val.tag() == .null_value) {
try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, @as(i32, 1));
+
+ const opcode: wasm.Opcode = buildOpcode(.{
+ .op = .@"const",
+ .valtype1 = try self.typeToValtype(payload_type),
+ });
+ try writer.writeByte(wasm.opcode(opcode));
try leb.writeULEB128(writer, @as(u32, 0));
- // after the error code, we emit the payload
- try self.emitConstant(data, payload_type);
+ } else {
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, @as(i32, 0));
+ try self.emitConstant(val, payload_type);
}
},
else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}),
@@ -1085,7 +1219,7 @@ pub const Context = struct {
}
/// Returns a `Value` as a signed 32 bit value.
- /// It's illegale to provide a value with a type that cannot be represented
+ /// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(self: Context, val: Value, ty: Type) i32 {
switch (ty.zigTypeTag()) {
@@ -1180,7 +1314,6 @@ pub const Context = struct {
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const writer = self.code.writer();
-
// TODO: Handle death instructions for then and else body
// insert blocks at the position of `offset` so
@@ -1306,7 +1439,7 @@ pub const Context = struct {
fn airStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload);
- const struct_ptr = self.resolveInst(extra.data.struct_ptr);
+ const struct_ptr = self.resolveInst(extra.data.struct_operand);
return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) };
}
@@ -1490,4 +1623,60 @@ pub const Context = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
return self.resolveInst(ty_op.operand);
}
+
+ fn airIntcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const ty = self.air.getRefType(ty_op.ty);
+ const operand = self.resolveInst(ty_op.operand);
+ const ref_ty = self.air.typeOf(ty_op.operand);
+ const ref_info = ref_ty.intInfo(self.target);
+ const op_bits = ref_info.bits;
+ const wanted_bits = ty.intInfo(self.target).bits;
+
+ try self.emitWValue(operand);
+ if (op_bits > 32 and wanted_bits <= 32) {
+ try self.code.append(wasm.opcode(.i32_wrap_i64));
+ } else if (op_bits <= 32 and wanted_bits > 32) {
+ try self.code.append(wasm.opcode(switch (ref_info.signedness) {
+ .signed => .i64_extend_i32_s,
+ .unsigned => .i64_extend_i32_u,
+ }));
+ }
+
+ // other cases are no-op
+ return .none;
+ }
+
+ fn airIsNull(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = self.resolveInst(un_op);
+ // const offset = self.code.items.len;
+ const writer = self.code.writer();
+
+ // load the null value which is positioned at multi_value's index
+ try self.emitWValue(.{ .local = operand.multi_value.index });
+ // Compare the null value with '0'
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, @as(i32, 0));
+
+ try writer.writeByte(@enumToInt(opcode));
+
+ // we save the result in a new local
+ const local = try self.allocLocal(Type.initTag(.i32));
+ try writer.writeByte(wasm.opcode(.local_set));
+ try leb.writeULEB128(writer, local.local);
+
+ return local;
+ }
+
+ fn airOptionalPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = self.resolveInst(ty_op.operand);
+ return WValue{ .local = operand.multi_value.index + 1 };
+ }
+
+ fn airWrapOptional(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ return self.resolveInst(ty_op.operand);
+ }
};