aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-01-29 03:45:15 -0500
committerGitHub <noreply@github.com>2022-01-29 03:45:15 -0500
commit225910f9341fbc725ff5e0d2c653e29bc2f21cb8 (patch)
treeeac47d40ae555398c46e4ccdff9cace12eeabd3b /src
parent63ee6e662582ee75ac804eb1a4dbdf4457b8f2d0 (diff)
parenta0a71709bc2104c708f045fbb42c6247aff136ac (diff)
downloadzig-225910f9341fbc725ff5e0d2c653e29bc2f21cb8.tar.gz
zig-225910f9341fbc725ff5e0d2c653e29bc2f21cb8.zip
Merge pull request #10639 from Vexu/f80
Add f80
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig2
-rw-r--r--src/Sema.zig3
-rw-r--r--src/Zir.zig5
-rw-r--r--src/stage1/all_types.hpp2
-rw-r--r--src/stage1/analyze.cpp13
-rw-r--r--src/stage1/codegen.cpp456
-rw-r--r--src/stage1/ir.cpp262
-rw-r--r--src/stage1/softfloat.hpp4
-rw-r--r--src/stage1/softfloat_ext.cpp38
-rw-r--r--src/stage1/softfloat_ext.hpp4
-rw-r--r--src/stage1/target.cpp11
-rw-r--r--src/stage1/target.hpp1
-rw-r--r--src/type.zig17
-rw-r--r--src/value.zig5
14 files changed, 780 insertions, 43 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 8328264306..cb6947b7c1 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -7723,6 +7723,7 @@ const primitives = std.ComptimeStringMap(Zir.Inst.Ref, .{
.{ "f16", .f16_type },
.{ "f32", .f32_type },
.{ "f64", .f64_type },
+ .{ "f80", .f80_type },
.{ "false", .bool_false },
.{ "i16", .i16_type },
.{ "i32", .i32_type },
@@ -8732,6 +8733,7 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.f16_type),
as_ty | @enumToInt(Zir.Inst.Ref.f32_type),
as_ty | @enumToInt(Zir.Inst.Ref.f64_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.f80_type),
as_ty | @enumToInt(Zir.Inst.Ref.f128_type),
as_ty | @enumToInt(Zir.Inst.Ref.anyopaque_type),
as_ty | @enumToInt(Zir.Inst.Ref.bool_type),
diff --git a/src/Sema.zig b/src/Sema.zig
index 84907a2044..ac67b3f07f 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -16950,6 +16950,7 @@ pub fn typeHasOnePossibleValue(
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
.comptime_int,
@@ -17227,6 +17228,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.f16 => return .f16_type,
.f32 => return .f32_type,
.f64 => return .f64_type,
+ .f80 => return .f80_type,
.f128 => return .f128_type,
.anyopaque => return .anyopaque_type,
.bool => return .bool_type,
@@ -17572,6 +17574,7 @@ fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) C
.f16,
.f32,
.f64,
+ .f80,
.f128,
.anyopaque,
.bool,
diff --git a/src/Zir.zig b/src/Zir.zig
index 86819d10f2..1ff103a876 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -1639,6 +1639,7 @@ pub const Inst = struct {
f16_type,
f32_type,
f64_type,
+ f80_type,
f128_type,
anyopaque_type,
bool_type,
@@ -1809,6 +1810,10 @@ pub const Inst = struct {
.ty = Type.initTag(.type),
.val = Value.initTag(.f64_type),
},
+ .f80_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.f80_type),
+ },
.f128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f128_type),
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index b5a7f07975..38bc3d0c87 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -516,6 +516,7 @@ struct ZigValue {
float16_t x_f16;
float x_f32;
double x_f64;
+ extFloat80_t x_f80;
float128_t x_f128;
bool x_bool;
ConstBoundFnValue x_bound_fn;
@@ -2089,6 +2090,7 @@ struct CodeGen {
ZigType *entry_f16;
ZigType *entry_f32;
ZigType *entry_f64;
+ ZigType *entry_f80;
ZigType *entry_f128;
ZigType *entry_void;
ZigType *entry_unreachable;
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index ff68198fc3..09fd41e24d 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -5647,6 +5647,9 @@ static uint32_t hash_combine_const_val(uint32_t hash_val, ZigValue *const_val) {
case 16: return hash_combine(hash_val, &const_val->data.x_f16);
case 32: return hash_combine(hash_val, &const_val->data.x_f32);
case 64: return hash_combine(hash_val, &const_val->data.x_f64);
+ case 80:
+ hash_val = hash_combine(hash_val, &const_val->data.x_f80.signExp);
+ return hash_combine(hash_val, &const_val->data.x_f80.signif);
case 128: return hash_combine(hash_val, &const_val->data.x_f128);
default: zig_unreachable();
}
@@ -6325,6 +6328,7 @@ void init_const_float(ZigValue *const_val, ZigType *type, double value) {
case 64:
const_val->data.x_f64 = value;
break;
+ case 80:
case 128:
// if we need this, we should add a function that accepts a float128_t param
zig_unreachable();
@@ -7218,6 +7222,8 @@ bool const_values_equal(CodeGen *g, ZigValue *a, ZigValue *b) {
return a->data.x_f32 == b->data.x_f32;
case 64:
return a->data.x_f64 == b->data.x_f64;
+ case 80:
+ return extF80M_eq(&a->data.x_f80, &b->data.x_f80);
case 128:
return f128M_eq(&a->data.x_f128, &b->data.x_f128);
default:
@@ -7470,6 +7476,13 @@ void render_const_value(CodeGen *g, Buf *buf, ZigValue *const_val) {
case 64:
buf_appendf(buf, "%f", const_val->data.x_f64);
return;
+ case 80: {
+ float64_t f64_value = extF80M_to_f64(&const_val->data.x_f80);
+ double double_value;
+ memcpy(&double_value, &f64_value, sizeof(double));
+ buf_appendf(buf, "%f", double_value);
+ return;
+ }
case 128:
{
const size_t extra_len = 100;
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index f9f37c2eb4..b97f009d62 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -1598,6 +1598,81 @@ static LLVMValueRef gen_assert_zero(CodeGen *g, LLVMValueRef expr_val, ZigType *
return nullptr;
}
+
+static LLVMValueRef gen_soft_f80_widen_or_shorten(CodeGen *g, ZigType *actual_type,
+ ZigType *wanted_type, LLVMValueRef expr_val)
+{
+ ZigType *scalar_actual_type = (actual_type->id == ZigTypeIdVector) ?
+ actual_type->data.vector.elem_type : actual_type;
+ ZigType *scalar_wanted_type = (wanted_type->id == ZigTypeIdVector) ?
+ wanted_type->data.vector.elem_type : wanted_type;
+ uint64_t actual_bits = scalar_actual_type->data.floating.bit_count;
+ uint64_t wanted_bits = scalar_wanted_type->data.floating.bit_count;
+
+
+ LLVMTypeRef param_type;
+ LLVMTypeRef return_type;
+ const char *func_name;
+
+ if (actual_bits == wanted_bits) {
+ return expr_val;
+ } else if (actual_bits == 80) {
+ param_type = g->builtin_types.entry_f80->llvm_type;
+ switch (wanted_bits) {
+ case 16:
+ return_type = g->builtin_types.entry_f16->llvm_type;
+ func_name = "__truncxfhf2";
+ break;
+ case 32:
+ return_type = g->builtin_types.entry_f32->llvm_type;
+ func_name = "__truncxfff2";
+ break;
+ case 64:
+ return_type = g->builtin_types.entry_f64->llvm_type;
+ func_name = "__truncxfdf2";
+ break;
+ case 128:
+ return_type = g->builtin_types.entry_f128->llvm_type;
+ func_name = "__extendxftf2";
+ break;
+ default:
+ zig_unreachable();
+ }
+ } else if (wanted_bits == 80) {
+ return_type = g->builtin_types.entry_f80->llvm_type;
+ switch (actual_bits) {
+ case 16:
+ param_type = g->builtin_types.entry_f16->llvm_type;
+ func_name = "__extendhfxf2";
+ break;
+ case 32:
+ param_type = g->builtin_types.entry_f32->llvm_type;
+ func_name = "__extendffxf2";
+ break;
+ case 64:
+ param_type = g->builtin_types.entry_f64->llvm_type;
+ func_name = "__extenddfxf2";
+ break;
+ case 128:
+ param_type = g->builtin_types.entry_f128->llvm_type;
+ func_name = "__trunctfxf2";
+ break;
+ default:
+ zig_unreachable();
+ }
+ } else {
+ zig_unreachable();
+ }
+
+ LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name);
+ if (func_ref == nullptr) {
+ LLVMTypeRef fn_type = LLVMFunctionType(return_type, &param_type, 1, false);
+ func_ref = LLVMAddFunction(g->module, func_name, fn_type);
+ }
+
+ return LLVMBuildCall(g->builder, func_ref, &expr_val, 1, "");
+}
+
static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, ZigType *actual_type,
ZigType *wanted_type, LLVMValueRef expr_val)
{
@@ -1612,6 +1687,13 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, Z
uint64_t actual_bits;
uint64_t wanted_bits;
if (scalar_actual_type->id == ZigTypeIdFloat) {
+
+ if ((scalar_actual_type == g->builtin_types.entry_f80
+ || scalar_wanted_type == g->builtin_types.entry_f80)
+ && !target_has_f80(g->zig_target))
+ {
+ return gen_soft_f80_widen_or_shorten(g, actual_type, wanted_type, expr_val);
+ }
actual_bits = scalar_actual_type->data.floating.bit_count;
wanted_bits = scalar_wanted_type->data.floating.bit_count;
} else if (scalar_actual_type->id == ZigTypeIdInt) {
@@ -3142,6 +3224,187 @@ static void gen_shift_rhs_check(CodeGen *g, ZigType *lhs_type, ZigType *rhs_type
}
}
+static LLVMValueRef get_soft_f80_bin_op_func(CodeGen *g, const char *name, int param_count, LLVMTypeRef return_type) {
+ LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, name);
+ if (existing_llvm_fn != nullptr) return existing_llvm_fn;
+
+ LLVMTypeRef float_type_ref = g->builtin_types.entry_f80->llvm_type;
+ LLVMTypeRef param_types[2] = { float_type_ref, float_type_ref };
+ LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, param_count, false);
+ return LLVMAddFunction(g->module, name, fn_type);
+}
+
+static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
+ Stage1AirInstBinOp *bin_op_instruction)
+{
+ IrBinOp op_id = bin_op_instruction->op_id;
+ Stage1AirInst *op1 = bin_op_instruction->op1;
+ Stage1AirInst *op2 = bin_op_instruction->op2;
+ uint32_t vector_len = op1->value->type->id == ZigTypeIdVector ? op1->value->type->data.vector.len : 0;
+
+ LLVMValueRef op1_value = ir_llvm_value(g, op1);
+ LLVMValueRef op2_value = ir_llvm_value(g, op2);
+
+ bool div_exact_safety_check = false;
+ LLVMTypeRef return_type = g->builtin_types.entry_f80->llvm_type;
+ int param_count = 2;
+ const char *func_name;
+ switch (op_id) {
+ case IrBinOpInvalid:
+ case IrBinOpArrayCat:
+ case IrBinOpArrayMult:
+ case IrBinOpRemUnspecified:
+ case IrBinOpBitShiftLeftLossy:
+ case IrBinOpBitShiftLeftExact:
+ case IrBinOpBitShiftRightLossy:
+ case IrBinOpBitShiftRightExact:
+ case IrBinOpBoolOr:
+ case IrBinOpBoolAnd:
+ case IrBinOpMultWrap:
+ case IrBinOpAddWrap:
+ case IrBinOpSubWrap:
+ case IrBinOpBinOr:
+ case IrBinOpBinXor:
+ case IrBinOpBinAnd:
+ case IrBinOpAddSat:
+ case IrBinOpSubSat:
+ case IrBinOpMultSat:
+ case IrBinOpShlSat:
+ zig_unreachable();
+ case IrBinOpCmpEq:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__eqxf2";
+ break;
+ case IrBinOpCmpNotEq:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__nexf2";
+ break;
+ case IrBinOpCmpLessOrEq:
+ case IrBinOpCmpLessThan:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__lexf2";
+ break;
+ case IrBinOpCmpGreaterOrEq:
+ case IrBinOpCmpGreaterThan:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__gexf2";
+ break;
+ case IrBinOpMaximum:
+ func_name = "__fmaxx";
+ break;
+ case IrBinOpMinimum:
+ func_name = "__fminx";
+ break;
+ case IrBinOpMult:
+ func_name = "__mulxf3";
+ break;
+ case IrBinOpAdd:
+ func_name = "__addxf3";
+ break;
+ case IrBinOpSub:
+ func_name = "__subxf3";
+ break;
+ case IrBinOpDivUnspecified:
+ func_name = "__divxf3";
+ break;
+ case IrBinOpDivExact:
+ func_name = "__divxf3";
+ div_exact_safety_check = bin_op_instruction->safety_check_on &&
+ ir_want_runtime_safety(g, &bin_op_instruction->base);
+ break;
+ case IrBinOpDivTrunc:
+ param_count = 1;
+ func_name = "__truncx";
+ break;
+ case IrBinOpDivFloor:
+ param_count = 1;
+ func_name = "__floorx";
+ break;
+ case IrBinOpRemRem:
+ param_count = 1;
+ func_name = "__remx";
+ break;
+ case IrBinOpRemMod:
+ param_count = 1;
+ func_name = "__modx";
+ break;
+ default:
+ zig_unreachable();
+ }
+
+ LLVMValueRef func_ref = get_soft_f80_bin_op_func(g, func_name, param_count, return_type);
+
+ LLVMValueRef result;
+ if (vector_len == 0) {
+ LLVMValueRef params[2] = {op1_value, op2_value};
+ result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
+ } else {
+ result = build_alloca(g, op1->value->type, "", 0);
+ }
+
+ LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
+ for (uint32_t i = 0; i < vector_len; i++) {
+ LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
+ LLVMValueRef params[2] = {
+ LLVMBuildExtractElement(g->builder, op1_value, index_value, ""),
+ LLVMBuildExtractElement(g->builder, op2_value, index_value, ""),
+ };
+ LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
+ LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
+ call_result, index_value, "");
+ }
+
+ if (div_exact_safety_check) {
+ // Safety check: a / b == floor(a / b)
+ LLVMValueRef floor_func = get_soft_f80_bin_op_func(g, "__floorx", 1, return_type);
+ LLVMValueRef eq_func = get_soft_f80_bin_op_func(g, "__eqxf2", 2, g->builtin_types.entry_i32->llvm_type);
+
+ LLVMValueRef ok_bit;
+ if (vector_len == 0) {
+ LLVMValueRef floored = LLVMBuildCall(g->builder, floor_func, &result, 1, "");
+
+ LLVMValueRef params[2] = {result, floored};
+ ok_bit = LLVMBuildCall(g->builder, eq_func, params, 2, "");
+ } else {
+ ZigType *bool_vec_ty = get_vector_type(g, vector_len, g->builtin_types.entry_bool);
+ ok_bit = build_alloca(g, bool_vec_ty, "", 0);
+ }
+
+ for (uint32_t i = 0; i < vector_len; i++) {
+ LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
+ LLVMValueRef div_res = LLVMBuildExtractElement(g->builder,
+ LLVMBuildLoad(g->builder, result, ""), index_value, "");
+
+ LLVMValueRef params[2] = {
+ div_res,
+ LLVMBuildCall(g->builder, floor_func, &div_res, 1, ""),
+ };
+ LLVMValueRef cmp_res = LLVMBuildCall(g->builder, eq_func, params, 2, "");
+ cmp_res = LLVMBuildTrunc(g->builder, cmp_res, g->builtin_types.entry_bool->llvm_type, "");
+ LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, ok_bit, ""),
+ cmp_res, index_value, "");
+ }
+
+ if (vector_len != 0) {
+ ok_bit = ZigLLVMBuildAndReduce(g->builder, LLVMBuildLoad(g->builder, ok_bit, ""));
+ }
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactOk");
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactFail");
+
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, fail_block);
+ gen_safety_crash(g, PanicMsgIdExactDivisionRemainder);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+
+ if (vector_len != 0) {
+ result = LLVMBuildLoad(g->builder, result, "");
+ }
+ return result;
+}
+
static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
Stage1AirInstBinOp *bin_op_instruction)
{
@@ -3151,6 +3414,10 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
ZigType *operand_type = op1->value->type;
ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ? operand_type->data.vector.elem_type : operand_type;
+ if (scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) {
+ return ir_render_soft_f80_bin_op(g, executable, bin_op_instruction);
+ }
+
bool want_runtime_safety = bin_op_instruction->safety_check_on &&
ir_want_runtime_safety(g, &bin_op_instruction->base);
@@ -3158,7 +3425,6 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
LLVMValueRef op1_value = ir_llvm_value(g, op1);
LLVMValueRef op2_value = ir_llvm_value(g, op2);
-
switch (op_id) {
case IrBinOpInvalid:
case IrBinOpArrayCat:
@@ -5927,7 +6193,7 @@ static LLVMValueRef ir_render_prefetch(CodeGen *g, Stage1Air *executable, Stage1
static_assert(PrefetchCacheInstruction == 0, "");
static_assert(PrefetchCacheData == 1, "");
assert(instruction->cache == PrefetchCacheData || instruction->cache == PrefetchCacheInstruction);
-
+
// LLVM fails during codegen of instruction cache prefetchs for these architectures.
// This is an LLVM bug as the prefetch intrinsic should be a noop if not supported by the target.
// To work around this, simply don't emit llvm.prefetch in this case.
@@ -6622,13 +6888,148 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, Stage1Air *executable,
return nullptr;
}
+static LLVMValueRef ir_render_soft_f80_float_op(CodeGen *g, Stage1Air *executable, Stage1AirInstFloatOp *instruction) {
+ ZigType *op_type = instruction->operand->value->type;
+ uint32_t vector_len = op_type->id == ZigTypeIdVector ? op_type->data.vector.len : 0;
+
+ const char *func_name;
+ switch (instruction->fn_id) {
+ case BuiltinFnIdSqrt:
+ func_name = "__sqrt";
+ break;
+ case BuiltinFnIdSin:
+ func_name = "__sinx";
+ break;
+ case BuiltinFnIdCos:
+ func_name = "__cosx";
+ break;
+ case BuiltinFnIdExp:
+ func_name = "__expx";
+ break;
+ case BuiltinFnIdExp2:
+ func_name = "__exp2x";
+ break;
+ case BuiltinFnIdLog:
+ func_name = "__logx";
+ break;
+ case BuiltinFnIdLog2:
+ func_name = "__log2x";
+ break;
+ case BuiltinFnIdLog10:
+ func_name = "__log10x";
+ break;
+ case BuiltinFnIdFabs:
+ func_name = "__fabsx";
+ break;
+ case BuiltinFnIdFloor:
+ func_name = "__floorx";
+ break;
+ case BuiltinFnIdCeil:
+ func_name = "__ceilx";
+ break;
+ case BuiltinFnIdTrunc:
+ func_name = "__truncx";
+ break;
+ case BuiltinFnIdNearbyInt:
+ func_name = "__nearbyintx";
+ break;
+ case BuiltinFnIdRound:
+ func_name = "__roundx";
+ break;
+ default:
+ zig_unreachable();
+ }
+
+
+ LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name);
+ if (func_ref == nullptr) {
+ LLVMTypeRef f80_ref = g->builtin_types.entry_f80->llvm_type;
+ LLVMTypeRef fn_type = LLVMFunctionType(f80_ref, &f80_ref, 1, false);
+ func_ref = LLVMAddFunction(g->module, func_name, fn_type);
+ }
+
+ LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
+ LLVMValueRef result;
+ if (vector_len == 0) {
+ result = LLVMBuildCall(g->builder, func_ref, &operand, 1, "");
+ } else {
+ result = build_alloca(g, instruction->operand->value->type, "", 0);
+ }
+
+ LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
+ for (uint32_t i = 0; i < vector_len; i++) {
+ LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
+ LLVMValueRef param = LLVMBuildExtractElement(g->builder, operand, index_value, "");
+ LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, &param, 1, "");
+ LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
+ call_result, index_value, "");
+ }
+ if (vector_len != 0) {
+ result = LLVMBuildLoad(g->builder, result, "");
+ }
+ return result;
+}
+
static LLVMValueRef ir_render_float_op(CodeGen *g, Stage1Air *executable, Stage1AirInstFloatOp *instruction) {
+ ZigType *op_type = instruction->operand->value->type;
+ op_type = op_type->id == ZigTypeIdVector ? op_type->data.vector.elem_type : op_type;
+ if (op_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) {
+ return ir_render_soft_f80_float_op(g, executable, instruction);
+ }
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
LLVMValueRef fn_val = get_float_fn(g, instruction->base.value->type, ZigLLVMFnIdFloatOp, instruction->fn_id);
return LLVMBuildCall(g->builder, fn_val, &operand, 1, "");
}
+static LLVMValueRef ir_render_soft_f80_mul_add(CodeGen *g, Stage1Air *executable, Stage1AirInstMulAdd *instruction) {
+ ZigType *op_type = instruction->op1->value->type;
+ uint32_t vector_len = op_type->id == ZigTypeIdVector ? op_type->data.vector.len : 0;
+
+ const char *func_name = "__fmax";
+ LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name);
+ if (func_ref == nullptr) {
+ LLVMTypeRef f80_ref = g->builtin_types.entry_f80->llvm_type;
+ LLVMTypeRef params[3] = { f80_ref, f80_ref, f80_ref };
+ LLVMTypeRef fn_type = LLVMFunctionType(f80_ref, params, 3, false);
+ func_ref = LLVMAddFunction(g->module, func_name, fn_type);
+ }
+
+ LLVMValueRef op1 = ir_llvm_value(g, instruction->op1);
+ LLVMValueRef op2 = ir_llvm_value(g, instruction->op2);
+ LLVMValueRef op3 = ir_llvm_value(g, instruction->op3);
+ LLVMValueRef result;
+ if (vector_len == 0) {
+ LLVMValueRef params[3] = { op1, op2, op3 };
+ result = LLVMBuildCall(g->builder, func_ref, params, 3, "");
+ } else {
+ result = build_alloca(g, instruction->op1->value->type, "", 0);
+ }
+
+ LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
+ for (uint32_t i = 0; i < vector_len; i++) {
+ LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
+
+ LLVMValueRef params[3] = {
+ LLVMBuildExtractElement(g->builder, op1, index_value, ""),
+ LLVMBuildExtractElement(g->builder, op2, index_value, ""),
+ LLVMBuildExtractElement(g->builder, op3, index_value, ""),
+ };
+ LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, 3, "");
+ LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
+ call_result, index_value, "");
+ }
+ if (vector_len != 0) {
+ result = LLVMBuildLoad(g->builder, result, "");
+ }
+ return result;
+}
+
static LLVMValueRef ir_render_mul_add(CodeGen *g, Stage1Air *executable, Stage1AirInstMulAdd *instruction) {
+ ZigType *op_type = instruction->op1->value->type;
+ op_type = op_type->id == ZigTypeIdVector ? op_type->data.vector.elem_type : op_type;
+ if (op_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) {
+ return ir_render_soft_f80_mul_add(g, executable, instruction);
+ }
LLVMValueRef op1 = ir_llvm_value(g, instruction->op1);
LLVMValueRef op2 = ir_llvm_value(g, instruction->op2);
LLVMValueRef op3 = ir_llvm_value(g, instruction->op3);
@@ -7692,20 +8093,33 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f32);
case 64:
return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f64);
+ case 80: {
+ uint64_t buf[2];
+ memcpy(&buf, &const_val->data.x_f80, 16);
+#if ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
+ uint64_t tmp = buf[0];
+ buf[0] = buf[1];
+ buf[1] = tmp;
+#endif
+ LLVMValueRef as_i128 = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf);
+ LLVMValueRef as_int = LLVMConstTrunc(as_i128, LLVMIntType(80));
+ return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));
+ }
case 128:
{
uint64_t buf[2];
- // LLVM seems to require that the lower half of the f128 be placed first in the buffer.
- #if defined(ZIG_BYTE_ORDER) && ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN
- buf[0] = const_val->data.x_f128.v[0];
- buf[1] = const_val->data.x_f128.v[1];
- #elif defined(ZIG_BYTE_ORDER) && ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
- buf[0] = const_val->data.x_f128.v[1];
- buf[1] = const_val->data.x_f128.v[0];
- #else
- #error Unsupported endian
- #endif
+ // LLVM seems to require that the lower half of the f128 be
+ // placed first in the buffer.
+#if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN
+ buf[0] = const_val->data.x_f128.v[0];
+ buf[1] = const_val->data.x_f128.v[1];
+#elif ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
+ buf[0] = const_val->data.x_f128.v[1];
+ buf[1] = const_val->data.x_f128.v[0];
+#else
+#error Unsupported endian
+#endif
LLVMValueRef as_int = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf);
return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));
@@ -8911,6 +9325,24 @@ static void define_builtin_types(CodeGen *g) {
add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
+ if (target_has_f80(g->zig_target)) {
+ add_fp_entry(g, "f80", 80, LLVMX86FP80Type(), &g->builtin_types.entry_f80);
+ } else {
+ ZigType *entry = new_type_table_entry(ZigTypeIdFloat);
+ entry->llvm_type = get_int_type(g, false, 128)->llvm_type;
+ entry->size_in_bits = 8 * LLVMStoreSizeOfType(g->target_data_ref, entry->llvm_type);
+ entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type);
+ entry->abi_align = 16;
+ buf_init_from_str(&entry->name, "f80");
+ entry->data.floating.bit_count = 80;
+
+ entry->llvm_di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
+ entry->size_in_bits, ZigLLVMEncoding_DW_ATE_unsigned());
+
+ g->builtin_types.entry_f80 = entry;
+ g->primitive_type_table.put(&entry->name, entry);
+ }
+
switch (g->zig_target->arch) {
case ZigLLVM_x86:
case ZigLLVM_x86_64:
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index cc68ce0d3c..1b9e9638e2 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -2688,6 +2688,12 @@ static bool float_has_fraction(ZigValue *const_val) {
return floorf(const_val->data.x_f32) != const_val->data.x_f32;
case 64:
return floor(const_val->data.x_f64) != const_val->data.x_f64;
+ case 80:
+ {
+ extFloat80_t floored;
+ extF80M_roundToInt(&const_val->data.x_f80, softfloat_round_minMag, false, &floored);
+ return !extF80M_eq(&floored, &const_val->data.x_f80);
+ }
case 128:
{
float128_t floored;
@@ -2716,6 +2722,15 @@ static void float_append_buf(Buf *buf, ZigValue *const_val) {
case 64:
buf_appendf(buf, "%f", const_val->data.x_f64);
break;
+ case 80:
+ {
+ float64_t f64_value = extF80M_to_f64(&const_val->data.x_f80);
+ double double_value;
+ memcpy(&double_value, &f64_value, sizeof(double));
+
+ buf_appendf(buf, "%f", const_val->data.x_f64);
+ break;
+ }
case 128:
{
// TODO actual implementation
@@ -2772,6 +2787,15 @@ static void float_init_bigint(BigInt *bigint, ZigValue *const_val) {
bigint->is_negative = true;
}
break;
+ case 80:
+ {
+ float128_t f128_value;
+ extF80M_to_f128M(&const_val->data.x_f80, &f128_value);
+ BigFloat tmp_float;
+ bigfloat_init_128(&tmp_float, f128_value);
+ bigint_init_bigfloat(bigint, &tmp_float);
+ }
+ break;
case 128:
{
BigFloat tmp_float;
@@ -2801,8 +2825,11 @@ static void float_init_bigfloat(ZigValue *dest_val, BigFloat *bigfloat) {
case 64:
dest_val->data.x_f64 = bigfloat_to_f64(bigfloat);
break;
- case 80:
- zig_panic("TODO: float_init_bigfloat c_longdouble");
+ case 80: {
+ float128_t f128_value = bigfloat_to_f128(bigfloat);
+ f128M_to_extF80M(&f128_value, &dest_val->data.x_f80);
+ break;
+ }
case 128:
dest_val->data.x_f128 = bigfloat_to_f128(bigfloat);
break;
@@ -2828,6 +2855,9 @@ static void float_init_f16(ZigValue *dest_val, float16_t x) {
case 64:
dest_val->data.x_f64 = zig_f16_to_double(x);
break;
+ case 80:
+ f16_to_extF80M(x, &dest_val->data.x_f80);
+ break;
case 128:
f16_to_f128M(x, &dest_val->data.x_f128);
break;
@@ -2853,6 +2883,12 @@ static void float_init_f32(ZigValue *dest_val, float x) {
case 64:
dest_val->data.x_f64 = x;
break;
+ case 80: {
+ float32_t x_f32;
+ memcpy(&x_f32, &x, sizeof(float));
+ f32_to_extF80M(x_f32, &dest_val->data.x_f80);
+ break;
+ }
case 128:
{
float32_t x_f32;
@@ -2882,6 +2918,12 @@ static void float_init_f64(ZigValue *dest_val, double x) {
case 64:
dest_val->data.x_f64 = x;
break;
+ case 80: {
+ float64_t x_f64;
+ memcpy(&x_f64, &x, sizeof(double));
+ f64_to_extF80M(x_f64, &dest_val->data.x_f80);
+ break;
+ }
case 128:
{
float64_t x_f64;
@@ -2917,6 +2959,9 @@ static void float_init_f128(ZigValue *dest_val, float128_t x) {
memcpy(&dest_val->data.x_f64, &f64_val, sizeof(double));
break;
}
+ case 80:
+ f128M_to_extF80M(&x, &dest_val->data.x_f80);
+ break;
case 128:
{
memcpy(&dest_val->data.x_f128, &x, sizeof(float128_t));
@@ -2944,6 +2989,12 @@ static void float_init_float(ZigValue *dest_val, ZigValue *src_val) {
case 64:
float_init_f64(dest_val, src_val->data.x_f64);
break;
+ case 80: {
+ float128_t f128_value;
+ extF80M_to_f128M(&src_val->data.x_f80, &f128_value);
+ float_init_f128(dest_val, f128_value);
+ break;
+ }
case 128:
float_init_f128(dest_val, src_val->data.x_f128);
break;
@@ -2966,6 +3017,8 @@ static bool float_is_nan(ZigValue *op) {
return op->data.x_f32 != op->data.x_f32;
case 64:
return op->data.x_f64 != op->data.x_f64;
+ case 80:
+ return zig_extF80_isNaN(&op->data.x_f80);
case 128:
return zig_f128_isNaN(&op->data.x_f128);
default:
@@ -3006,6 +3059,14 @@ static Cmp float_cmp(ZigValue *op1, ZigValue *op2) {
} else {
return CmpEQ;
}
+ case 80:
+ if (extF80M_lt(&op1->data.x_f80, &op2->data.x_f80)) {
+ return CmpLT;
+ } else if (extF80M_eq(&op1->data.x_f80, &op2->data.x_f80)) {
+ return CmpEQ;
+ } else {
+ return CmpGT;
+ }
case 128:
if (f128M_lt(&op1->data.x_f128, &op2->data.x_f128)) {
return CmpLT;
@@ -3061,7 +3122,18 @@ static Cmp float_cmp_zero(ZigValue *op) {
} else {
return CmpEQ;
}
- case 128:
+ case 80: {
+ extFloat80_t zero_float;
+ ui32_to_extF80M(0, &zero_float);
+ if (extF80M_lt(&op->data.x_f80, &zero_float)) {
+ return CmpLT;
+ } else if (extF80M_eq(&op->data.x_f80, &zero_float)) {
+ return CmpEQ;
+ } else {
+ return CmpGT;
+ }
+ }
+ case 128: {
float128_t zero_float;
ui32_to_f128M(0, &zero_float);
if (f128M_lt(&op->data.x_f128, &zero_float)) {
@@ -3071,6 +3143,7 @@ static Cmp float_cmp_zero(ZigValue *op) {
} else {
return CmpGT;
}
+ }
default:
zig_unreachable();
}
@@ -3095,6 +3168,9 @@ static void float_add(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = op1->data.x_f64 + op2->data.x_f64;
return;
+ case 80:
+ extF80M_add(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_add(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3122,6 +3198,9 @@ static void float_sub(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = op1->data.x_f64 - op2->data.x_f64;
return;
+ case 80:
+ extF80M_sub(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_sub(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3149,6 +3228,9 @@ static void float_mul(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = op1->data.x_f64 * op2->data.x_f64;
return;
+ case 80:
+ extF80M_mul(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_mul(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3176,6 +3258,9 @@ static void float_div(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64;
return;
+ case 80:
+ extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3204,6 +3289,10 @@ static void float_div_trunc(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64);
return;
+ case 80:
+ extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ extF80M_roundToInt(&out_val->data.x_f80, softfloat_round_minMag, false, &out_val->data.x_f80);
+ return;
case 128:
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
f128M_roundToInt(&out_val->data.x_f128, softfloat_round_minMag, false, &out_val->data.x_f128);
@@ -3233,6 +3322,10 @@ static void float_div_floor(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = floor(op1->data.x_f64 / op2->data.x_f64);
return;
+ case 80:
+ extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ extF80M_roundToInt(&out_val->data.x_f80, softfloat_round_min, false, &out_val->data.x_f80);
+ return;
case 128:
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
f128M_roundToInt(&out_val->data.x_f128, softfloat_round_min, false, &out_val->data.x_f128);
@@ -3261,6 +3354,9 @@ static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = fmod(op1->data.x_f64, op2->data.x_f64);
return;
+ case 80:
+ extF80M_rem(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3290,6 +3386,14 @@ static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t*
f128M_sub(a, c, c);
}
+// c = a - b * trunc(a / b)
+static void zig_extF80M_mod(const extFloat80_t* a, const extFloat80_t* b, extFloat80_t* c) {
+ extF80M_div(a, b, c);
+ extF80M_roundToInt(c, softfloat_round_min, true, c);
+ extF80M_mul(b, c, c);
+ extF80M_sub(a, c, c);
+}
+
static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
@@ -3306,6 +3410,9 @@ static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
case 64:
out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64);
return;
+ case 80:
+ zig_extF80M_mod(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
@@ -3351,6 +3458,15 @@ static void float_max(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
out_val->data.x_f64 = op1->data.x_f64 > op2->data.x_f64 ? op1->data.x_f64 : op2->data.x_f64;
}
return;
+ case 80:
+ if (zig_extF80_isNaN(&op1->data.x_f80)) {
+ out_val->data.x_f80 = op2->data.x_f80;
+ } else if (zig_extF80_isNaN(&op2->data.x_f80)) {
+ out_val->data.x_f80 = op1->data.x_f80;
+ } else {
+ out_val->data.x_f80 = extF80M_lt(&op1->data.x_f80, &op2->data.x_f80) ? op2->data.x_f80 : op1->data.x_f80;
+ }
+ return;
case 128:
if (zig_f128_isNaN(&op1->data.x_f128)) {
out_val->data.x_f128 = op2->data.x_f128;
@@ -3402,6 +3518,15 @@ static void float_min(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
out_val->data.x_f64 = op1->data.x_f32 < op2->data.x_f64 ? op1->data.x_f64 : op2->data.x_f64;
}
return;
+ case 80:
+ if (zig_extF80_isNaN(&op1->data.x_f80)) {
+ out_val->data.x_f80 = op2->data.x_f80;
+ } else if (zig_extF80_isNaN(&op2->data.x_f80)) {
+ out_val->data.x_f80 = op1->data.x_f80;
+ } else {
+ out_val->data.x_f80 = extF80M_lt(&op1->data.x_f80, &op2->data.x_f80) ? op1->data.x_f80 : op2->data.x_f80;
+ }
+ return;
case 128:
if (zig_f128_isNaN(&op1->data.x_f128)) {
out_val->data.x_f128 = op2->data.x_f128;
@@ -3434,6 +3559,9 @@ static void float_negate(ZigValue *out_val, ZigValue *op) {
case 64:
out_val->data.x_f64 = -op->data.x_f64;
return;
+ case 80:
+ extF80M_neg(&op->data.x_f80, &out_val->data.x_f80);
+ return;
case 128:
f128M_neg(&op->data.x_f128, &out_val->data.x_f128);
return;
@@ -3462,6 +3590,9 @@ void float_write_ieee597(ZigValue *op, uint8_t *buf, bool target_is_big_endian)
case 64:
memcpy(buf, &op->data.x_f64, 8);
break;
+ case 80:
+ memcpy(buf, &op->data.x_f80, 16);
+ break;
case 128:
memcpy(buf, &op->data.x_f128, 16);
break;
@@ -3511,6 +3642,9 @@ void float_read_ieee597(ZigValue *val, uint8_t *buf, bool target_is_big_endian)
case 64:
memcpy(&val->data.x_f64, ptr, 8);
return;
+ case 80:
+ memcpy(&val->data.x_f80, ptr, 16);
+ return;
case 128:
memcpy(&val->data.x_f128, ptr, 16);
return;
@@ -3538,8 +3672,12 @@ static void value_to_bigfloat(BigFloat *out, ZigValue *val) {
case 64:
bigfloat_init_64(out, val->data.x_f64);
return;
- case 80:
- zig_panic("TODO: value_to_bigfloat c_longdouble");
+ case 80: {
+ float128_t f128_value;
+ extF80M_to_f128M(&val->data.x_f80, &f128_value);
+ bigfloat_init_128(out, f128_value);
+ return;
+ }
case 128:
bigfloat_init_128(out, val->data.x_f128);
return;
@@ -3628,8 +3766,14 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc
bigfloat_init_64(&orig_bf, tmp);
break;
}
- case 80:
- zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble");
+ case 80: {
+ float128_t tmp = bigfloat_to_f128(&tmp_bf);
+ extFloat80_t tmp80;
+ f128M_to_extF80M(&tmp, &tmp80);
+ extF80M_to_f128M(&tmp80, &tmp);
+ bigfloat_init_128(&orig_bf, tmp);
+ break;
+ }
case 128: {
float128_t tmp = bigfloat_to_f128(&tmp_bf);
bigfloat_init_128(&orig_bf, tmp);
@@ -3673,8 +3817,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc
}
break;
}
- case 80:
- zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble");
+ case 80: {
+ float16_t tmp = extF80M_to_f16(&const_val->data.x_f80);
+ extFloat80_t orig;
+ f16_to_extF80M(tmp, &orig);
+ if (extF80M_eq(&orig, &const_val->data.x_f80)) {
+ return true;
+ }
+ break;
+ }
case 128: {
float16_t tmp = f128M_to_f16(&const_val->data.x_f128);
float128_t orig;
@@ -3698,8 +3849,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc
}
break;
}
- case 80:
- zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble");
+ case 80: {
+ float32_t tmp = extF80M_to_f32(&const_val->data.x_f80);
+ extFloat80_t orig;
+ f32_to_extF80M(tmp, &orig);
+ if (extF80M_eq(&orig, &const_val->data.x_f80)) {
+ return true;
+ }
+ break;
+ }
case 128: {
float32_t tmp = f128M_to_f32(&const_val->data.x_f128);
float128_t orig;
@@ -3715,8 +3873,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc
break;
case 64:
switch (const_val->type->data.floating.bit_count) {
- case 80:
- zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble");
+ case 80: {
+ float64_t tmp = extF80M_to_f64(&const_val->data.x_f80);
+ extFloat80_t orig;
+ f64_to_extF80M(tmp, &orig);
+ if (extF80M_eq(&orig, &const_val->data.x_f80)) {
+ return true;
+ }
+ break;
+ }
case 128: {
float64_t tmp = f128M_to_f64(&const_val->data.x_f128);
float128_t orig;
@@ -3730,9 +3895,17 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc
zig_unreachable();
}
break;
- case 80:
+ case 80: {
assert(const_val->type->data.floating.bit_count == 128);
- zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble");
+ extFloat80_t tmp;
+ f128M_to_extF80M(&const_val->data.x_f128, &tmp);
+ float128_t orig;
+ extF80M_to_f128M(&tmp, &orig);
+ if (f128M_eq(&orig, &const_val->data.x_f128)) {
+ return true;
+ }
+ break;
+ }
case 128:
return true;
default:
@@ -5143,8 +5316,11 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, Scope *scope, AstNode
case 64:
const_val->data.x_f64 = bigfloat_to_f64(&other_val->data.x_bigfloat);
break;
- case 80:
- zig_panic("TODO: eval_const_expr_implicit_cast c_longdouble");
+ case 80: {
+ float128_t tmp = bigfloat_to_f128(&other_val->data.x_bigfloat);
+ f128M_to_extF80M(&tmp, &const_val->data.x_f80);
+ break;
+ }
case 128:
const_val->data.x_f128 = bigfloat_to_f128(&other_val->data.x_bigfloat);
break;
@@ -5172,8 +5348,11 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, Scope *scope, AstNode
case 64:
const_val->data.x_f64 = bigfloat_to_f64(&bigfloat);
break;
- case 80:
- zig_panic("TODO: eval_const_expr_implicit_cast c_longdouble");
+ case 80: {
+ float128_t tmp = bigfloat_to_f128(&other_val->data.x_bigfloat);
+ f128M_to_extF80M(&tmp, &const_val->data.x_f80);
+ break;
+ }
case 128:
const_val->data.x_f128 = bigfloat_to_f128(&bigfloat);
break;
@@ -18960,6 +19139,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
case 16: return ira->codegen->builtin_types.entry_f16;
case 32: return ira->codegen->builtin_types.entry_f32;
case 64: return ira->codegen->builtin_types.entry_f64;
+ case 80: return ira->codegen->builtin_types.entry_f80;
case 128: return ira->codegen->builtin_types.entry_f128;
}
ir_add_error_node(ira, source_node, buf_sprintf("%d-bit float unsupported", bits));
@@ -21943,6 +22123,8 @@ static void ir_eval_mul_add(IrAnalyze *ira, ZigType *float_type,
case 64:
out_val->data.x_f64 = fma(op1->data.x_f64, op2->data.x_f64, op3->data.x_f64);
break;
+ case 80:
+ zig_panic("compiler bug: TODO: implement 'mulAdd' for type 'f80'. See https://github.com/ziglang/zig/issues/4026");
case 128:
f128M_mulAdd(&op1->data.x_f128, &op2->data.x_f128, &op3->data.x_f128, &out_val->data.x_f128);
break;
@@ -24156,10 +24338,44 @@ static ErrorMsg *ir_eval_float_op(IrAnalyze *ira, Scope *scope, AstNode *source_
}
break;
}
- case 80:
- return ir_add_error_node(ira, source_node,
- buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026",
- float_op_to_name(fop), buf_ptr(&float_type->name)));
+ case 80: {
+ extFloat80_t *out = &out_val->data.x_f80;
+ extFloat80_t *in = &op->data.x_f80;
+ switch (fop) {
+ case BuiltinFnIdSqrt:
+ extF80M_sqrt(in, out);
+ break;
+ case BuiltinFnIdFabs:
+ extF80M_abs(in, out);
+ break;
+ case BuiltinFnIdFloor:
+ extF80M_roundToInt(in, softfloat_round_min, false, out);
+ break;
+ case BuiltinFnIdCeil:
+ extF80M_roundToInt(in, softfloat_round_max, false, out);
+ break;
+ case BuiltinFnIdTrunc:
+ extF80M_trunc(in, out);
+ break;
+ case BuiltinFnIdRound:
+ extF80M_roundToInt(in, softfloat_round_near_maxMag, false, out);
+ break;
+ case BuiltinFnIdNearbyInt:
+ case BuiltinFnIdSin:
+ case BuiltinFnIdCos:
+ case BuiltinFnIdExp:
+ case BuiltinFnIdExp2:
+ case BuiltinFnIdLog:
+ case BuiltinFnIdLog10:
+ case BuiltinFnIdLog2:
+ return ir_add_error_node(ira, source_node,
+ buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026",
+ float_op_to_name(fop), buf_ptr(&float_type->name)));
+ default:
+ zig_unreachable();
+ }
+ break;
+ }
case 128: {
float128_t *out, *in;
if (float_type->id == ZigTypeIdComptimeFloat) {
diff --git a/src/stage1/softfloat.hpp b/src/stage1/softfloat.hpp
index 0d43292c4d..a0d270d55f 100644
--- a/src/stage1/softfloat.hpp
+++ b/src/stage1/softfloat.hpp
@@ -56,4 +56,8 @@ static inline bool zig_f128_isNaN(float128_t *aPtr) {
|| ((absA64 == UINT64_C(0x7FFF000000000000)) && lo);
}
+static inline bool zig_extF80_isNaN(extFloat80_t *aPtr) {
+ return (aPtr->signExp & 0x7FFF) == 0x7FFF && aPtr->signif & UINT64_C(0x7FFFFFFFFFFFFFFF);
+}
+
#endif
diff --git a/src/stage1/softfloat_ext.cpp b/src/stage1/softfloat_ext.cpp
index d0b8d1a5b3..bb4c134d9e 100644
--- a/src/stage1/softfloat_ext.cpp
+++ b/src/stage1/softfloat_ext.cpp
@@ -28,13 +28,6 @@ void f128M_trunc(const float128_t *aPtr, float128_t *zPtr) {
}
}
-float16_t f16_neg(const float16_t a) {
- union { uint16_t ui; float16_t f; } uA;
- // Toggle the sign bit.
- uA.ui = a.v ^ (UINT16_C(1) << 15);
- return uA.f;
-}
-
void f128M_neg(const float128_t *aPtr, float128_t *zPtr) {
// Toggle the sign bit.
#if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN
@@ -46,4 +39,33 @@ void f128M_neg(const float128_t *aPtr, float128_t *zPtr) {
#else
#error Unsupported endian
#endif
-} \ No newline at end of file
+}
+
+void extF80M_abs(const extFloat80_t *aPtr, extFloat80_t *zPtr) {
+ // Clear the sign bit.
+ zPtr->signExp = aPtr->signExp & UINT16_C(0x7FFF);
+ zPtr->signif = aPtr->signif;
+}
+
+void extF80M_trunc(const extFloat80_t *aPtr, extFloat80_t *zPtr) {
+ extFloat80_t zero_float;
+ ui32_to_extF80M(0, &zero_float);
+ if (extF80M_lt(aPtr, &zero_float)) {
+ extF80M_roundToInt(aPtr, softfloat_round_max, false, zPtr);
+ } else {
+ extF80M_roundToInt(aPtr, softfloat_round_min, false, zPtr);
+ }
+}
+
+void extF80M_neg(const extFloat80_t *aPtr, extFloat80_t *zPtr) {
+ // Toggle the sign bit.
+ zPtr->signExp = aPtr->signExp ^ UINT16_C(0x8000);
+ zPtr->signif = aPtr->signif;
+}
+
+float16_t f16_neg(const float16_t a) {
+ union { uint16_t ui; float16_t f; } uA;
+ // Toggle the sign bit.
+ uA.ui = a.v ^ (UINT16_C(1) << 15);
+ return uA.f;
+}
diff --git a/src/stage1/softfloat_ext.hpp b/src/stage1/softfloat_ext.hpp
index 42922a5226..4e6fd753c8 100644
--- a/src/stage1/softfloat_ext.hpp
+++ b/src/stage1/softfloat_ext.hpp
@@ -7,6 +7,10 @@ void f128M_abs(const float128_t *aPtr, float128_t *zPtr);
void f128M_trunc(const float128_t *aPtr, float128_t *zPtr);
void f128M_neg(const float128_t *aPtr, float128_t *zPtr);
+void extF80M_abs(const extFloat80_t *aPtr, extFloat80_t *zPtr);
+void extF80M_trunc(const extFloat80_t *aPtr, extFloat80_t *zPtr);
+void extF80M_neg(const extFloat80_t *aPtr, extFloat80_t *zPtr);
+
float16_t f16_neg(const float16_t a);
#endif \ No newline at end of file
diff --git a/src/stage1/target.cpp b/src/stage1/target.cpp
index feb2c7f143..a505b4bd21 100644
--- a/src/stage1/target.cpp
+++ b/src/stage1/target.cpp
@@ -1019,6 +1019,17 @@ bool target_long_double_is_f128(const ZigTarget *target) {
}
}
+bool target_has_f80(const ZigTarget *target) {
+ switch (target->arch) {
+ case ZigLLVM_x86:
+ case ZigLLVM_x86_64:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
bool target_is_riscv(const ZigTarget *target) {
return target->arch == ZigLLVM_riscv32 || target->arch == ZigLLVM_riscv64;
}
diff --git a/src/stage1/target.hpp b/src/stage1/target.hpp
index 6851d88618..2e26033549 100644
--- a/src/stage1/target.hpp
+++ b/src/stage1/target.hpp
@@ -81,6 +81,7 @@ bool target_is_sparc(const ZigTarget *target);
bool target_is_android(const ZigTarget *target);
bool target_has_debug_info(const ZigTarget *target);
bool target_long_double_is_f128(const ZigTarget *target);
+bool target_has_f80(const ZigTarget *target);
uint32_t target_arch_pointer_bit_width(ZigLLVM_ArchType arch);
uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch);
diff --git a/src/type.zig b/src/type.zig
index 0020ccd7cc..23a741eed0 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -58,6 +58,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
=> return .Float,
@@ -833,6 +834,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.bool,
.void,
@@ -1053,6 +1055,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.bool,
.void,
@@ -1371,6 +1374,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.bool,
.void,
@@ -1473,6 +1477,7 @@ pub const Type = extern union {
.f16 => return Value.initTag(.f16_type),
.f32 => return Value.initTag(.f32_type),
.f64 => return Value.initTag(.f64_type),
+ .f80 => return Value.initTag(.f80_type),
.f128 => return Value.initTag(.f128_type),
.bool => return Value.initTag(.bool_type),
.void => return Value.initTag(.void_type),
@@ -1543,6 +1548,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.bool,
.anyerror,
@@ -1858,6 +1864,7 @@ pub const Type = extern union {
.f16 => return 2,
.f32 => return 4,
.f64 => return 8,
+ .f80 => return 16,
.f128 => return 16,
.c_longdouble => return 16,
@@ -2138,6 +2145,7 @@ pub const Type = extern union {
.f16 => return 2,
.f32 => return 4,
.f64 => return 8,
+ .f80 => return 16,
.f128 => return 16,
.c_longdouble => return 16,
@@ -2277,6 +2285,7 @@ pub const Type = extern union {
.i16, .u16, .f16 => 16,
.i32, .u32, .f32 => 32,
.i64, .u64, .f64 => 64,
+ .f80 => 80,
.u128, .i128, .f128 => 128,
.isize,
@@ -3170,6 +3179,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
=> true,
@@ -3184,6 +3194,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
.comptime_float,
@@ -3200,6 +3211,7 @@ pub const Type = extern union {
.f16 => 16,
.f32 => 32,
.f64 => 64,
+ .f80 => 80,
.f128, .comptime_float => 128,
.c_longdouble => CType.longdouble.sizeInBits(target),
@@ -3340,6 +3352,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
.comptime_int,
@@ -3381,6 +3394,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.c_longdouble,
.comptime_int,
@@ -3579,6 +3593,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.anyopaque,
.bool,
@@ -4334,6 +4349,7 @@ pub const Type = extern union {
f16,
f32,
f64,
+ f80,
f128,
anyopaque,
bool,
@@ -4453,6 +4469,7 @@ pub const Type = extern union {
.f16,
.f32,
.f64,
+ .f80,
.f128,
.anyopaque,
.bool,
diff --git a/src/value.zig b/src/value.zig
index 9d9895a6e0..faf4f38e80 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -47,6 +47,7 @@ pub const Value = extern union {
f16_type,
f32_type,
f64_type,
+ f80_type,
f128_type,
anyopaque_type,
bool_type,
@@ -205,6 +206,7 @@ pub const Value = extern union {
.f16_type,
.f32_type,
.f64_type,
+ .f80_type,
.f128_type,
.anyopaque_type,
.bool_type,
@@ -398,6 +400,7 @@ pub const Value = extern union {
.f16_type,
.f32_type,
.f64_type,
+ .f80_type,
.f128_type,
.anyopaque_type,
.bool_type,
@@ -630,6 +633,7 @@ pub const Value = extern union {
.f16_type => return out_stream.writeAll("f16"),
.f32_type => return out_stream.writeAll("f32"),
.f64_type => return out_stream.writeAll("f64"),
+ .f80_type => return out_stream.writeAll("f80"),
.f128_type => return out_stream.writeAll("f128"),
.anyopaque_type => return out_stream.writeAll("anyopaque"),
.bool_type => return out_stream.writeAll("bool"),
@@ -824,6 +828,7 @@ pub const Value = extern union {
.f16_type => Type.initTag(.f16),
.f32_type => Type.initTag(.f32),
.f64_type => Type.initTag(.f64),
+ .f80_type => Type.initTag(.f80),
.f128_type => Type.initTag(.f128),
.anyopaque_type => Type.initTag(.anyopaque),
.bool_type => Type.initTag(.bool),