aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig68
-rw-r--r--src/codegen/llvm.zig694
-rw-r--r--src/codegen/llvm/bindings.zig30
-rw-r--r--src/codegen/spirv.zig1
-rw-r--r--src/codegen/wasm.zig12
5 files changed, 649 insertions, 156 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index ad98dc87c1..dd71590566 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -976,7 +976,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul => try airBinOp (f, inst, " * "),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try airBinOp( f, inst, " / "),
+ .div_float, .div_exact, .div_trunc => try airBinOp( f, inst, " / "),
+ .div_floor => try airBinOp( f, inst, " divfloor "),
.rem => try airBinOp( f, inst, " % "),
.mod => try airBinOp( f, inst, " mod "), // TODO implement modulus division
@@ -992,6 +993,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.min => try airMinMax(f, inst, "<"),
.max => try airMinMax(f, inst, ">"),
+ .slice => try airSlice(f, inst),
+
.cmp_eq => try airBinOp(f, inst, " == "),
.cmp_gt => try airBinOp(f, inst, " > "),
.cmp_gte => try airBinOp(f, inst, " >= "),
@@ -1075,11 +1078,13 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.slice_ptr => try airSliceField(f, inst, ".ptr;\n"),
.slice_len => try airSliceField(f, inst, ".len;\n"),
+ .ptr_slice_len_ptr => try airPtrSliceFieldPtr(f, inst, ".len;\n"),
+ .ptr_slice_ptr_ptr => try airPtrSliceFieldPtr(f, inst, ".ptr;\n"),
+
.ptr_elem_val => try airPtrElemVal(f, inst, "["),
- .ptr_ptr_elem_val => try airPtrElemVal(f, inst, "[0]["),
.ptr_elem_ptr => try airPtrElemPtr(f, inst),
.slice_elem_val => try airSliceElemVal(f, inst, "["),
- .ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["),
+ .slice_elem_ptr => try airSliceElemPtr(f, inst),
.array_elem_val => try airArrayElemVal(f, inst),
.unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst),
@@ -1101,8 +1106,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
}
fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+ if (f.liveness.isUnused(inst)) return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -1114,6 +1118,21 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue
return local;
}
+fn airPtrSliceFieldPtr(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+
+ _ = writer;
+ _ = operand;
+ _ = suffix;
+
+ return f.fail("TODO: C backend: airPtrSliceFieldPtr", .{});
+}
+
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue {
const is_volatile = false; // TODO
if (!is_volatile and f.liveness.isUnused(inst))
@@ -1148,6 +1167,24 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CVal
return local;
}
+fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const slice = try f.resolveInst(bin_op.lhs);
+ const index = try f.resolveInst(bin_op.rhs);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const);
+ try writer.writeAll(" = &");
+ try f.writeCValue(writer, slice);
+ try writer.writeByte('[');
+ try f.writeCValue(writer, index);
+ try writer.writeAll("];\n");
+ return local;
+}
+
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
@@ -1623,6 +1660,27 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValu
return local;
}
+fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try f.resolveInst(bin_op.lhs);
+ const len = try f.resolveInst(bin_op.rhs);
+
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+
+ try writer.writeAll(" = {");
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll("};\n");
+
+ return local;
+}
+
fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 681b3c36db..80625928cb 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -527,19 +527,11 @@ pub const Object = struct {
if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
alias.setAliasee(llvm_global);
} else {
- const alias = self.llvm_module.addAlias(llvm_global.typeOf(), llvm_global, exp_name_z);
- switch (exp.options.linkage) {
- .Internal => alias.setLinkage(.Internal),
- .Strong => alias.setLinkage(.External),
- .Weak => {
- if (is_extern) {
- alias.setLinkage(.ExternalWeak);
- } else {
- alias.setLinkage(.WeakODR);
- }
- },
- .LinkOnce => alias.setLinkage(.LinkOnceODR),
- }
+ _ = self.llvm_module.addAlias(
+ llvm_global.typeOf(),
+ llvm_global,
+ exp_name_z,
+ );
}
}
} else {
@@ -589,7 +581,9 @@ pub const DeclGen = struct {
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try self.resolveLlvmFunction(extern_fn.data);
} else {
+ const target = self.module.getTarget();
const global = try self.resolveGlobalDecl(decl);
+ global.setAlignment(decl.getAlignment(target));
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
const variable = payload.data;
@@ -1090,6 +1084,37 @@ pub const DeclGen = struct {
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False);
return llvm_int.constIntToPtr(try self.llvmType(tv.ty));
},
+ .field_ptr => {
+ const field_ptr = tv.val.castTag(.field_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = tv.val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try self.llvmType(Type.usize);
+ if (parent_ptr.typeOf().getElementType().getTypeKind() == .Array) {
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ } else {
+ const indices: [1]*const llvm.Value = .{
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ }
+ },
+ .null_value, .zero => {
+ const llvm_type = try self.llvmType(tv.ty);
+ return llvm_type.constNull();
+ },
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
},
.Array => switch (tv.val.tag()) {
@@ -1254,6 +1279,10 @@ pub const DeclGen = struct {
}
const field_ty = tv.ty.unionFieldType(tag_and_val.tag);
const payload = p: {
+ if (!field_ty.hasCodeGenBits()) {
+ const padding_len = @intCast(c_uint, layout.payload_size);
+ break :p self.context.intType(8).arrayType(padding_len).getUndef();
+ }
const field = try genTypedValue(self, .{ .ty = field_ty, .val = tag_and_val.val });
const field_size = field_ty.abiSize(target);
if (field_size == layout.payload_size) {
@@ -1284,6 +1313,66 @@ pub const DeclGen = struct {
}
return llvm_union_ty.constNamedStruct(&fields, fields.len);
},
+ .Vector => switch (tv.val.tag()) {
+ .bytes => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const bytes = tv.val.castTag(.bytes).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
+
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ var byte_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = bytes[i],
+ };
+
+ elem.* = try self.genTypedValue(.{
+ .ty = elem_ty,
+ .val = Value.initPayload(&byte_payload.base),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .array => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ // The value includes the sentinel in those cases.
+ const elem_vals = tv.val.castTag(.array).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .repeated => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const val = tv.val.castTag(.repeated).?.data;
+ const elem_ty = tv.ty.elemType();
+ const len = tv.ty.arrayLen();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ else => unreachable,
+ },
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -1298,11 +1387,73 @@ pub const DeclGen = struct {
.Frame,
.AnyFrame,
- .Vector,
=> return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
+ const ParentPtr = struct {
+ ty: Type,
+ llvm_ptr: *const llvm.Value,
+ };
+
+ fn lowerParentPtrDecl(
+ dg: *DeclGen,
+ ptr_val: Value,
+ decl: *Module.Decl,
+ ) Error!ParentPtr {
+ decl.alive = true;
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = decl.ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
+ return ParentPtr{
+ .llvm_ptr = llvm_ptr,
+ .ty = decl.ty,
+ };
+ }
+
+ fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!*const llvm.Value {
+ switch (ptr_val.tag()) {
+ .decl_ref_mut => {
+ const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .decl_ref => {
+ const decl = ptr_val.castTag(.decl_ref).?.data;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .variable => {
+ const decl = ptr_val.castTag(.variable).?.data.owner_decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = dg.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try dg.llvmType(Type.usize);
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .opt_payload_ptr => return dg.todo("implement lowerParentPtr for optional payload", .{}),
+ .eu_payload_ptr => return dg.todo("implement lowerParentPtr for error union payload", .{}),
+ else => unreachable,
+ }
+ }
+
fn lowerDeclRefValue(
self: *DeclGen,
tv: TypedValue,
@@ -1328,12 +1479,13 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
}
- decl.alive = true;
const llvm_type = try self.llvmType(tv.ty);
if (!tv.ty.childType().hasCodeGenBits()) {
return self.lowerPtrToVoid(tv.ty);
}
+ decl.alive = true;
+
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
try self.resolveLlvmFunction(decl)
else
@@ -1523,6 +1675,7 @@ pub const FuncGen = struct {
global.setInitializer(llvm_val);
global.setLinkage(.Private);
global.setGlobalConstant(.True);
+ global.setUnnamedAddr(.True);
global.setAlignment(ty.abiAlignment(target));
gop.value_ptr.* = global;
return global;
@@ -1542,7 +1695,10 @@ pub const FuncGen = struct {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
+ .div_float => try self.airDivFloat(inst),
+ .div_trunc => try self.airDivTrunc(inst),
+ .div_floor => try self.airDivFloor(inst),
+ .div_exact => try self.airDivExact(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.ptr_add => try self.airPtrAdd(inst),
@@ -1552,6 +1708,7 @@ pub const FuncGen = struct {
.shl_exact => try self.airShlExact(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
.bit_and, .bool_and => try self.airAnd(inst),
.bit_or, .bool_or => try self.airOr(inst),
@@ -1600,6 +1757,10 @@ pub const FuncGen = struct {
.assembly => try self.airAssembly(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
+
+ .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0),
+ .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1),
+
.array_to_slice => try self.airArrayToSlice(inst),
.float_to_int => try self.airFloatToInt(inst),
.int_to_float => try self.airIntToFloat(inst),
@@ -1630,13 +1791,12 @@ pub const FuncGen = struct {
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
- .optional_payload => try self.airOptionalPayload(inst, false),
- .optional_payload_ptr => try self.airOptionalPayload(inst, true),
+ .optional_payload => try self.airOptionalPayload(inst),
+ .optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
.unwrap_errunion_payload => try self.airErrUnionPayload(inst, false),
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true),
@@ -1829,14 +1989,16 @@ pub const FuncGen = struct {
const raw_llvm_ty = try self.dg.llvmType(inst_ty);
- // If the zig tag type is a function, this represents an actual function body; not
- // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
- // of function pointers, however the phi makes it a runtime value and therefore
- // the LLVM type has to be wrapped in a pointer.
- const llvm_ty = if (inst_ty.zigTypeTag() == .Fn)
- raw_llvm_ty.pointerType(0)
- else
- raw_llvm_ty;
+ const llvm_ty = ty: {
+ // If the zig tag type is a function, this represents an actual function body; not
+ // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
+ // of function pointers, however the phi makes it a runtime value and therefore
+ // the LLVM type has to be wrapped in a pointer.
+ if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) {
+ break :ty raw_llvm_ty.pointerType(0);
+ }
+ break :ty raw_llvm_ty;
+ };
const phi_node = self.builder.buildPhi(llvm_ty, "");
phi_node.addIncoming(
@@ -1874,23 +2036,55 @@ pub const FuncGen = struct {
const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- {
- const prev_block = self.builder.getInsertBlock();
- defer self.builder.positionBuilderAtEnd(prev_block);
+ _ = self.builder.buildCondBr(cond, then_block, else_block);
- self.builder.positionBuilderAtEnd(then_block);
- try self.genBody(then_body);
+ self.builder.positionBuilderAtEnd(then_block);
+ try self.genBody(then_body);
- self.builder.positionBuilderAtEnd(else_block);
- try self.genBody(else_body);
- }
- _ = self.builder.buildCondBr(cond, then_block, else_block);
+ self.builder.positionBuilderAtEnd(else_block);
+ try self.genBody(else_body);
+
+ // No need to reset the insert cursor since this instruction is noreturn.
return null;
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- _ = inst;
- return self.todo("implement llvm codegen for switch_br", .{});
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond = try self.resolveInst(pl_op.operand);
+ const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+ const llvm_switch = self.builder.buildSwitch(cond, else_block, switch_br.data.cases_len);
+
+ var extra_index: usize = switch_br.end;
+ var case_i: u32 = 0;
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + case.data.items_len + case_body.len;
+
+ const case_block = self.context.appendBasicBlock(self.llvm_func, "Case");
+
+ for (items) |item| {
+ const llvm_item = try self.resolveInst(item);
+ llvm_switch.addCase(llvm_item, case_block);
+ }
+
+ self.builder.positionBuilderAtEnd(case_block);
+ try self.genBody(case_body);
+ }
+
+ self.builder.positionBuilderAtEnd(else_block);
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ if (else_body.len != 0) {
+ try self.genBody(else_body);
+ } else {
+ _ = self.builder.buildUnreachable();
+ }
+
+ // No need to reset the insert cursor since this instruction is noreturn.
+ return null;
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1972,14 +2166,22 @@ pub const FuncGen = struct {
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return self.builder.buildExtractValue(operand, index, "");
}
+ fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const slice_ptr = try self.resolveInst(ty_op.operand);
+
+ return self.builder.buildStructGEP(slice_ptr, index, "");
+ }
+
fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -1987,28 +2189,18 @@ pub const FuncGen = struct {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*const llvm.Value = .{index};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr = self.sliceElemPtr(slice, index);
return self.load(ptr, slice_ty);
}
- fn airPtrSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
-
- const base_ptr = ptr: {
- const ptr_field_ptr = self.builder.buildStructGEP(lhs, 0, "");
- break :ptr self.builder.buildLoad(ptr_field_ptr, "");
- };
+ fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, slice_ty);
+ const slice = try self.resolveInst(bin_op.lhs);
+ const index = try self.resolveInst(bin_op.rhs);
+ return self.sliceElemPtr(slice, index);
}
fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2068,19 +2260,6 @@ pub const FuncGen = struct {
}
}
- fn airPtrPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildLoad(lhs, "");
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, ptr_ty);
- }
-
fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -2112,17 +2291,34 @@ pub const FuncGen = struct {
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
- const field_index = llvmFieldIndex(struct_ty, struct_field.field_index);
- if (isByRef(struct_ty)) {
- const field_ptr = self.builder.buildStructGEP(struct_llvm_val, field_index, "");
- const field_ty = struct_ty.structFieldType(struct_field.field_index);
- if (isByRef(field_ty)) {
- return field_ptr;
- } else {
- return self.builder.buildLoad(field_ptr, "");
- }
+ const field_index = struct_field.field_index;
+ const field_ty = struct_ty.structFieldType(field_index);
+ if (!field_ty.hasCodeGenBits()) {
+ return null;
+ }
+
+ assert(isByRef(struct_ty));
+
+ const field_ptr = switch (struct_ty.zigTypeTag()) {
+ .Struct => blk: {
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
+ break :blk self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
+ },
+ .Union => blk: {
+ const llvm_field_ty = try self.dg.llvmType(field_ty);
+ const target = self.dg.module.getTarget();
+ const layout = struct_ty.unionGetLayout(target);
+ const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
+ const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, "");
+ break :blk self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
+ },
+ else => unreachable,
+ };
+
+ if (isByRef(field_ty)) {
+ return field_ptr;
} else {
- return self.builder.buildExtractValue(struct_llvm_val, field_index, "");
+ return self.builder.buildLoad(field_ptr, "");
}
}
@@ -2154,17 +2350,19 @@ pub const FuncGen = struct {
const air_asm = self.air.extraData(Air.Asm, ty_pl.payload);
const zir = self.dg.decl.getFileScope().zir;
const extended = zir.instructions.items(.data)[air_asm.data.zir_index].extended;
- const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
- const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
- const outputs_len = @truncate(u5, extended.small);
- const args_len = @truncate(u5, extended.small >> 5);
- const clobbers_len = @truncate(u5, extended.small >> 10);
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..outputs_len]);
- const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end + outputs.len ..][0..args_len]);
+ if (!is_volatile and self.liveness.isUnused(inst)) {
+ return null;
+ }
+ const outputs_len = @truncate(u5, extended.small);
if (outputs_len > 1) {
return self.todo("implement llvm codegen for asm with more than 1 output", .{});
}
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..args_len]);
var extra_i: usize = zir_extra.end;
const output_constraint: ?[]const u8 = out: {
@@ -2177,10 +2375,6 @@ pub const FuncGen = struct {
break :out null;
};
- if (!is_volatile and self.liveness.isUnused(inst)) {
- return null;
- }
-
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
defer llvm_constraints.deinit(self.gpa);
@@ -2188,7 +2382,7 @@ pub const FuncGen = struct {
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
- const llvm_params_len = args.len + @boolToInt(output_constraint != null);
+ const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
const llvm_param_values = try arena.alloc(*const llvm.Value, llvm_params_len);
@@ -2200,7 +2394,8 @@ pub const FuncGen = struct {
if (total_i != 0) {
llvm_constraints.appendAssumeCapacity(',');
}
- llvm_constraints.appendSliceAssumeCapacity(constraint);
+ llvm_constraints.appendAssumeCapacity('=');
+ llvm_constraints.appendSliceAssumeCapacity(constraint[1..]);
total_i += 1;
}
@@ -2326,8 +2521,7 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
@@ -2341,7 +2535,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(err_union_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
const loaded = self.builder.buildLoad(err_field_ptr, "");
return self.builder.buildICmp(op, loaded, zero, "");
@@ -2351,35 +2545,55 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- fn airOptionalPayload(
- self: *FuncGen,
- inst: Air.Inst.Index,
- operand_is_ptr: bool,
- ) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand).childType();
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
+ if (!payload_ty.hasCodeGenBits()) {
+ // We have a pointer to a zero-bit value and we need to return
+ // a pointer to a zero-bit value.
+ return operand;
+ }
+ if (optional_ty.isPtrLikeOptional()) {
+ // The payload and the optional are the same value.
+ return operand;
+ }
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
- if (operand_is_ptr) {
- const operand_ty = self.air.typeOf(ty_op.operand).elemType();
- if (operand_ty.isPtrLikeOptional()) {
- return self.builder.buildLoad(operand, "");
- }
+ fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
- const index_type = self.context.intType(32);
- var indices: [2]*const llvm.Value = .{
- index_type.constNull(), index_type.constNull(),
- };
- return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
- }
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOfIndex(inst);
+ if (!payload_ty.hasCodeGenBits()) return null;
- const operand_ty = self.air.typeOf(ty_op.operand);
- if (operand_ty.isPtrLikeOptional()) {
+ if (optional_ty.isPtrLikeOptional()) {
+ // Payload value is the same as the optional value.
return operand;
}
+ if (isByRef(payload_ty)) {
+ // We have a pointer and we need to return a pointer to the first field.
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
+
return self.builder.buildExtractValue(operand, 0, "");
}
@@ -2388,22 +2602,16 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
-
- if (!payload_ty.hasCodeGenBits()) {
- return null;
- }
-
- if (operand_is_ptr) {
+ if (!payload_ty.hasCodeGenBits()) return null;
+ if (operand_is_ptr or isByRef(payload_ty)) {
return self.builder.buildStructGEP(operand, 1, "");
}
-
return self.builder.buildExtractValue(operand, 1, "");
}
@@ -2425,7 +2633,7 @@ pub const FuncGen = struct {
return self.builder.buildLoad(operand, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(payload_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
return self.builder.buildLoad(err_field_ptr, "");
}
@@ -2437,9 +2645,9 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(1).constAllOnes();
- if (!operand_ty.hasCodeGenBits()) return non_null_bit;
+ if (!payload_ty.hasCodeGenBits()) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.isPtrLikeOptional()) return operand;
@@ -2447,8 +2655,6 @@ pub const FuncGen = struct {
if (isByRef(optional_ty)) {
const optional_ptr = self.buildAlloca(llvm_optional_ty);
const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, "");
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = payload_ty,
@@ -2494,10 +2700,35 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const err_un_ty = self.air.typeOfIndex(inst);
+ const payload_ty = err_un_ty.errorUnionPayload();
+ const operand = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasCodeGenBits()) {
+ return operand;
+ }
+ const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
+ if (isByRef(err_un_ty)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty);
+ const err_ptr = self.builder.buildStructGEP(result_ptr, 0, "");
+ _ = self.builder.buildStore(operand, err_ptr);
+ const payload_ptr = self.builder.buildStructGEP(result_ptr, 1, "");
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = payload_ty,
+ };
+ const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ // TODO store undef to payload_ptr
+ _ = payload_ptr;
+ _ = payload_ptr_ty;
+ return result_ptr;
+ }
- return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{});
+ const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, "");
+ // TODO set payload bytes to undef
+ return partial;
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2526,6 +2757,20 @@ pub const FuncGen = struct {
return self.builder.buildUMax(lhs, rhs, "");
}
+ fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const len = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+ const llvm_slice_ty = try self.dg.llvmType(inst_ty);
+
+ const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, "");
+ return self.builder.buildInsertValue(partial, len, 1, "");
+ }
+
fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2635,20 +2880,76 @@ pub const FuncGen = struct {
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
- fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildFDiv(lhs, rhs, "");
+ }
+
+ fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return self.callTrunc(result, inst_ty);
+ }
if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
+ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return try self.callFloor(result, inst_ty);
+ }
+ if (inst_ty.isSignedInt()) {
+ // const d = @divTrunc(a, b);
+ // const r = @rem(a, b);
+ // return if (r == 0) d else d - ((a < 0) ^ (b < 0));
+ const result_llvm_ty = try self.dg.llvmType(inst_ty);
+ const zero = result_llvm_ty.constNull();
+ const div_trunc = self.builder.buildSDiv(lhs, rhs, "");
+ const rem = self.builder.buildSRem(lhs, rhs, "");
+ const rem_eq_0 = self.builder.buildICmp(.EQ, rem, zero, "");
+ const a_lt_0 = self.builder.buildICmp(.SLT, lhs, zero, "");
+ const b_lt_0 = self.builder.buildICmp(.SLT, rhs, zero, "");
+ const a_b_xor = self.builder.buildXor(a_lt_0, b_lt_0, "");
+ const a_b_xor_ext = self.builder.buildZExt(a_b_xor, div_trunc.typeOf(), "");
+ const d_sub_xor = self.builder.buildSub(div_trunc, a_b_xor_ext, "");
+ return self.builder.buildSelect(rem_eq_0, div_trunc, d_sub_xor, "");
+ }
+ return self.builder.buildUDiv(lhs, rhs, "");
+ }
+
+ fn airDivExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, "");
+ return self.builder.buildExactUDiv(lhs, rhs, "");
+ }
+
fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2691,26 +2992,42 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
- const indices: [1]*const llvm.Value = .{offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");
- const indices: [1]*const llvm.Value = .{negative_offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), negative_offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{negative_offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2883,8 +3200,9 @@ pub const FuncGen = struct {
const inst_ty = self.air.typeOfIndex(inst);
const llvm_dest_ty = try self.dg.llvmType(inst_ty);
- // TODO look into pulling this logic out into a different AIR instruction than bitcast
- if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
+ if (operand_ty.zigTypeTag() == .Int and inst_ty.zigTypeTag() == .Pointer) {
+ return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
+ } else if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
const target = self.dg.module.getTarget();
const elem_ty = operand_ty.childType();
if (!isByRef(inst_ty)) {
@@ -2914,6 +3232,43 @@ pub const FuncGen = struct {
}
}
return array_ptr;
+ } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
+ const target = self.dg.module.getTarget();
+ const elem_ty = operand_ty.childType();
+ const llvm_vector_ty = try self.dg.llvmType(inst_ty);
+ if (!isByRef(operand_ty)) {
+ return self.dg.todo("implement bitcast non-ref array to vector", .{});
+ }
+
+ const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ if (bitcast_ok) {
+ const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0);
+ const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, "");
+ const vector = self.builder.buildLoad(casted_ptr, "");
+ // The array is aligned to the element's alignment, while the vector might have a completely
+ // different alignment. This means we need to enforce the alignment of this load.
+ vector.setAlignment(elem_ty.abiAlignment(target));
+ return vector;
+ } else {
+ // If the ABI size of the element type is not evenly divisible by size in bits;
+ // a simple bitcast will not work, and we fall back to extractelement.
+ const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_u32 = self.context.intType(32);
+ const zero = llvm_usize.constNull();
+ const vector_len = operand_ty.arrayLen();
+ var vector = llvm_vector_ty.getUndef();
+ var i: u64 = 0;
+ while (i < vector_len) : (i += 1) {
+ const index_usize = llvm_usize.constInt(i, .False);
+ const index_u32 = llvm_u32.constInt(i, .False);
+ const indexes: [2]*const llvm.Value = .{ zero, index_usize };
+ const elem_ptr = self.builder.buildInBoundsGEP(operand, &indexes, indexes.len, "");
+ const elem = self.builder.buildLoad(elem_ptr, "");
+ vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
+ }
+
+ return vector;
+ }
}
return self.builder.buildBitCast(operand, llvm_dest_ty, "");
@@ -3298,6 +3653,37 @@ pub const FuncGen = struct {
}
}
+ fn callFloor(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "floor");
+ }
+
+ fn callCeil(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "ceil");
+ }
+
+ fn callTrunc(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "trunc");
+ }
+
+ fn callFloatUnary(self: *FuncGen, arg: *const llvm.Value, ty: Type, name: []const u8) !*const llvm.Value {
+ const target = self.dg.module.getTarget();
+
+ var fn_name_buf: [100]u8 = undefined;
+ const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.f{d}", .{
+ name, ty.floatBits(target),
+ }) catch unreachable;
+
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const operand_llvm_ty = try self.dg.llvmType(ty);
+ const param_types = [_]*const llvm.Type{operand_llvm_ty};
+ const fn_type = llvm.functionType(operand_llvm_ty, &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const args: [1]*const llvm.Value = .{arg};
+ return self.builder.buildCall(llvm_fn, &args, args.len, .C, .Auto, "");
+ }
+
fn fieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
@@ -3336,6 +3722,16 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
}
+ fn sliceElemPtr(
+ self: *FuncGen,
+ slice: *const llvm.Value,
+ index: *const llvm.Value,
+ ) *const llvm.Value {
+ const base_ptr = self.builder.buildExtractValue(slice, 0, "");
+ const indices: [1]*const llvm.Value = .{index};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
+
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index ab4cf97350..43aca87532 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -181,6 +181,9 @@ pub const Value = opaque {
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
+
+ pub const addCase = LLVMAddCase;
+ extern fn LLVMAddCase(Switch: *const Value, OnVal: *const Value, Dest: *const BasicBlock) void;
};
pub const Type = opaque {
@@ -234,6 +237,9 @@ pub const Type = opaque {
pub const getTypeKind = LLVMGetTypeKind;
extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
+
+ pub const getElementType = LLVMGetElementType;
+ extern fn LLVMGetElementType(Ty: *const Type) *const Type;
};
pub const Module = opaque {
@@ -316,6 +322,12 @@ pub const VerifierFailureAction = enum(c_int) {
pub const constNeg = LLVMConstNeg;
extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
+pub const constVector = LLVMConstVector;
+extern fn LLVMConstVector(
+ ScalarConstantVals: [*]*const Value,
+ Size: c_uint,
+) *const Value;
+
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
@@ -551,6 +563,9 @@ pub const Builder = opaque {
pub const buildCondBr = LLVMBuildCondBr;
extern fn LLVMBuildCondBr(*const Builder, If: *const Value, Then: *const BasicBlock, Else: *const BasicBlock) *const Value;
+ pub const buildSwitch = LLVMBuildSwitch;
+ extern fn LLVMBuildSwitch(*const Builder, V: *const Value, Else: *const BasicBlock, NumCases: c_uint) *const Value;
+
pub const buildPhi = LLVMBuildPhi;
extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
@@ -570,6 +585,15 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
+ pub const buildInsertElement = LLVMBuildInsertElement;
+ extern fn LLVMBuildInsertElement(
+ *const Builder,
+ VecVal: *const Value,
+ EltVal: *const Value,
+ Index: *const Value,
+ Name: [*:0]const u8,
+ ) *const Value;
+
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*const Builder,
@@ -735,6 +759,12 @@ pub const Builder = opaque {
pub const buildSMin = ZigLLVMBuildSMin;
extern fn ZigLLVMBuildSMin(builder: *const Builder, LHS: *const Value, RHS: *const Value, name: [*:0]const u8) *const Value;
+
+ pub const buildExactUDiv = LLVMBuildExactUDiv;
+ extern fn LLVMBuildExactUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildExactSDiv = LLVMBuildExactSDiv;
+ extern fn LLVMBuildExactSDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
};
pub const IntPredicate = enum(c_uint) {
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 25a1d228e0..da2fa66fee 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -669,7 +669,6 @@ pub const DeclGen = struct {
.add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
.sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
.mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
- .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index 6902553257..75e6a1d78e 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -822,7 +822,7 @@ pub const Context = struct {
.subwrap => self.airWrapBinOp(inst, .sub),
.mul => self.airBinOp(inst, .mul),
.mulwrap => self.airWrapBinOp(inst, .mul),
- .div => self.airBinOp(inst, .div),
+ .div_trunc => self.airBinOp(inst, .div),
.bit_and => self.airBinOp(inst, .@"and"),
.bit_or => self.airBinOp(inst, .@"or"),
.bool_and => self.airBinOp(inst, .@"and"),
@@ -866,6 +866,7 @@ pub const Context = struct {
.struct_field_ptr_index_1 => self.airStructFieldPtrIndex(inst, 1),
.struct_field_ptr_index_2 => self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => self.airStructFieldPtrIndex(inst, 3),
+ .struct_field_val => self.airStructFieldVal(inst),
.switch_br => self.airSwitchBr(inst),
.unreach => self.airUnreachable(inst),
.wrap_optional => self.airWrapOptional(inst),
@@ -1456,6 +1457,15 @@ pub const Context = struct {
return WValue{ .local = struct_ptr.multi_value.index + index };
}
+ fn airStructFieldVal(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue.none;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_multivalue = self.resolveInst(extra.struct_operand).multi_value;
+ return WValue{ .local = struct_multivalue.index + extra.field_index };
+ }
+
fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
// result type is always 'noreturn'
const blocktype = wasm.block_empty;