aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/llvm.zig
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2022-04-29 11:24:30 +0200
committerAndrew Kelley <andrew@ziglang.org>2022-05-05 22:29:30 -0700
commitedb3adaa33f943d0c64071fbf9d43aadadaa1e95 (patch)
tree0f29acb927328d61104e940d7a1d1d63d8013bfa /src/codegen/llvm.zig
parent413b789e06bfc98bd285f0a340bd537b1d2c9dec (diff)
downloadzig-edb3adaa33f943d0c64071fbf9d43aadadaa1e95.tar.gz
zig-edb3adaa33f943d0c64071fbf9d43aadadaa1e95.zip
stage2,llvm: handle softfloats in @intToFloat and @floatToInt
If the hw doesn't have support for exotic floating-point types such as `f80`, we lower the call to a compiler-rt function call instead. I've added a behavior test specifically targeting this use case which now passes on `aarch64-macos`. Additionally, this commit makes it possible to successfully build stage3 on `aarch64-macos`. We can print the compiler's help message, however, building with it needs a little bit more love still.
Diffstat (limited to 'src/codegen/llvm.zig')
-rw-r--r--src/codegen/llvm.zig163
1 files changed, 149 insertions, 14 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 426ef7c378..374ae9b38e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -4229,41 +4229,166 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, len, 1, "");
}
+ inline fn isPowerOfTwo(bits: u64) bool {
+ return bits != 0 and ((bits & (~bits + 1)) == bits);
+ }
+
+ fn intTypeFromBitsAndSignRounded(self: *FuncGen, bits: u16, signed: bool) error{OutOfMemory}!Type {
+ const next_pow_two = math.log2_int_ceil(u16, bits);
+ const rounded_bits = @as(u32, 1) << next_pow_two;
+ return switch (rounded_bits) {
+ 8, 16, 32 => if (signed) Type.initTag(.i32) else Type.initTag(.u32),
+ 64 => if (signed) Type.initTag(.i64) else Type.initTag(.u64),
+ 128 => if (signed) Type.initTag(.i128) else Type.initTag(.u128),
+ else => |big| if (signed)
+ Type.Tag.int_signed.create(
+ self.dg.object.type_map_arena.allocator(),
+ @intCast(u16, big),
+ )
+ else
+ Type.Tag.int_unsigned.create(
+ self.dg.object.type_map_arena.allocator(),
+ @intCast(u16, big),
+ ),
+ };
+ }
+
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
+ const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType();
+
+ var operand = try self.resolveInst(ty_op.operand);
+ var operand_ty = self.air.typeOf(ty_op.operand);
+ var operand_scalar_ty = operand_ty.scalarType();
+
+ {
+ const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target));
+ const is_signed = operand_scalar_ty.isSignedInt();
+
+ if (!isPowerOfTwo(operand_bits) or operand_bits < 32) {
+ const wider_ty = try self.intTypeFromBitsAndSignRounded(operand_bits, is_signed);
+ const wider_llvm_ty = try self.dg.llvmType(wider_ty);
+ if (is_signed) {
+ operand = self.builder.buildSExt(operand, wider_llvm_ty, "");
+ } else {
+ operand = self.builder.buildZExt(operand, wider_llvm_ty, "");
+ }
+ operand_ty = wider_ty;
+ operand_scalar_ty = operand_ty.scalarType();
+ }
+ }
+
const dest_ty = self.air.typeOfIndex(inst);
+ const dest_scalar_ty = dest_ty.scalarType();
const dest_llvm_ty = try self.dg.llvmType(dest_ty);
- if (operand_scalar_ty.isSignedInt()) {
- return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
- } else {
- return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
+ if (intrinsicsAllowed(dest_scalar_ty, target)) {
+ if (operand_scalar_ty.isSignedInt()) {
+ return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
+ } else {
+ return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
+ }
}
+
+ const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target));
+ const compiler_rt_operand_abbrev = compilerRtIntAbbrev(operand_bits);
+
+ const dest_bits = dest_scalar_ty.floatBits(target);
+ const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits);
+
+ var fn_name_buf: [64]u8 = undefined;
+ const fn_name = if (operand_scalar_ty.isSignedInt())
+ std.fmt.bufPrintZ(&fn_name_buf, "__float{s}i{s}f", .{
+ compiler_rt_operand_abbrev,
+ compiler_rt_dest_abbrev,
+ }) catch unreachable
+ else
+ std.fmt.bufPrintZ(&fn_name_buf, "__floatun{s}i{s}f", .{
+ compiler_rt_operand_abbrev,
+ compiler_rt_dest_abbrev,
+ }) catch unreachable;
+
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const param_types = [1]*const llvm.Type{operand_llvm_ty};
+ const libc_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+ const params = [1]*const llvm.Value{operand};
+
+ return self.builder.buildCall(libc_fn, &params, params.len, .C, .Auto, "");
}
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
+ const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+
const operand = try self.resolveInst(ty_op.operand);
- const dest_ty = self.air.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType();
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const operand_scalar_ty = operand_ty.scalarType();
+
+ var dest_ty = self.air.typeOfIndex(inst);
+ var dest_scalar_ty = dest_ty.scalarType();
+
+ if (intrinsicsAllowed(operand_scalar_ty, target)) {
+ // TODO set fast math flag
+ const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+ if (dest_scalar_ty.isSignedInt()) {
+ return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
+ } else {
+ return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
+ }
+ }
+
+ const needs_truncating = blk: {
+ const dest_bits = @intCast(u16, dest_scalar_ty.bitSize(target));
+
+ if (!isPowerOfTwo(dest_bits) or dest_bits < 32) {
+ dest_ty = try self.intTypeFromBitsAndSignRounded(dest_bits, dest_scalar_ty.isSignedInt());
+ dest_scalar_ty = dest_ty.scalarType();
+ break :blk true;
+ }
+
+ break :blk false;
+ };
+
const dest_llvm_ty = try self.dg.llvmType(dest_ty);
- // TODO set fast math flag
+ const operand_bits = operand_scalar_ty.floatBits(target);
+ const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
- if (dest_scalar_ty.isSignedInt()) {
- return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
- } else {
- return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
+ const dest_bits = @intCast(u16, dest_scalar_ty.bitSize(target));
+ const compiler_rt_dest_abbrev = compilerRtIntAbbrev(dest_bits);
+
+ var fn_name_buf: [64]u8 = undefined;
+ const fn_name = if (dest_scalar_ty.isSignedInt())
+ std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}f{s}i", .{
+ compiler_rt_operand_abbrev,
+ compiler_rt_dest_abbrev,
+ }) catch unreachable
+ else
+ std.fmt.bufPrintZ(&fn_name_buf, "__fixun{s}f{s}i", .{
+ compiler_rt_operand_abbrev,
+ compiler_rt_dest_abbrev,
+ }) catch unreachable;
+
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const param_types = [1]*const llvm.Type{operand_llvm_ty};
+ const libc_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+ const params = [1]*const llvm.Value{operand};
+
+ const result = self.builder.buildCall(libc_fn, &params, params.len, .C, .Auto, "");
+
+ if (needs_truncating) {
+ const requested_ty = self.air.typeOfIndex(inst);
+ const requested_llvm_ty = try self.dg.llvmType(requested_ty);
+ return self.builder.buildTrunc(result, requested_llvm_ty, "");
}
+
+ return result;
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
@@ -5615,6 +5740,16 @@ pub const FuncGen = struct {
};
}
+ fn compilerRtIntAbbrev(bits: u16) []const u8 {
+ return switch (bits) {
+ 16 => "h",
+ 32 => "s",
+ 64 => "d",
+ 128 => "t",
+ else => "o", // Non-standard
+ };
+ }
+
/// Creates a floating point comparison by lowering to the appropriate
/// hardware instruction or softfloat routine for the target
fn buildFloatCmp(