aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2022-06-20 00:26:39 +0200
committerGitHub <noreply@github.com>2022-06-20 00:26:39 +0200
commit74442f35030a9c4f4ff65db01a18e8fb2f2a1ecf (patch)
tree0a3417d662326ce757cc8d89ca7e19498ead9207
parent33cf6ef621114daad63d14067b6ff374e664d410 (diff)
parent1d4dbf8d3c891346e6dc978764e8bce9c85ad044 (diff)
downloadzig-74442f35030a9c4f4ff65db01a18e8fb2f2a1ecf.tar.gz
zig-74442f35030a9c4f4ff65db01a18e8fb2f2a1ecf.zip
Merge pull request #11847 from ziglang/better-libcompiler_rt
-rw-r--r--CMakeLists.txt123
-rwxr-xr-xci/azure/macos_script2
-rw-r--r--lib/compiler_rt.zig1057
-rw-r--r--lib/compiler_rt/absv.zig20
-rw-r--r--lib/compiler_rt/absvdi2.zig12
-rw-r--r--lib/compiler_rt/absvdi2_test.zig5
-rw-r--r--lib/compiler_rt/absvsi2.zig12
-rw-r--r--lib/compiler_rt/absvsi2_test.zig5
-rw-r--r--lib/compiler_rt/absvti2.zig12
-rw-r--r--lib/compiler_rt/absvti2_test.zig5
-rw-r--r--lib/compiler_rt/adddf3.zig20
-rw-r--r--lib/compiler_rt/addf3.zig (renamed from lib/compiler_rt/addXf3.zig)86
-rw-r--r--lib/compiler_rt/addf3_test.zig (renamed from lib/compiler_rt/addXf3_test.zig)7
-rw-r--r--lib/compiler_rt/addo.zig10
-rw-r--r--lib/compiler_rt/addsf3.zig20
-rw-r--r--lib/compiler_rt/addtf3.zig26
-rw-r--r--lib/compiler_rt/addxf3.zig12
-rw-r--r--lib/compiler_rt/arm.zig79
-rw-r--r--lib/compiler_rt/atomics.zig2
-rw-r--r--lib/compiler_rt/aulldiv.zig15
-rw-r--r--lib/compiler_rt/aullrem.zig15
-rw-r--r--lib/compiler_rt/bswap.zig10
-rw-r--r--lib/compiler_rt/ceil.zig25
-rw-r--r--lib/compiler_rt/clear_cache.zig9
-rw-r--r--lib/compiler_rt/cmp.zig14
-rw-r--r--lib/compiler_rt/cmpdf2.zig68
-rw-r--r--lib/compiler_rt/cmpsf2.zig68
-rw-r--r--lib/compiler_rt/cmptf2.zig122
-rw-r--r--lib/compiler_rt/cmpxf2.zig50
-rw-r--r--lib/compiler_rt/common.zig190
-rw-r--r--lib/compiler_rt/compareXf2.zig328
-rw-r--r--lib/compiler_rt/comparedf2_test.zig24
-rw-r--r--lib/compiler_rt/comparef.zig118
-rw-r--r--lib/compiler_rt/comparesf2_test.zig24
-rw-r--r--lib/compiler_rt/cos.zig15
-rw-r--r--lib/compiler_rt/count0bits.zig22
-rw-r--r--lib/compiler_rt/divdf3.zig155
-rw-r--r--lib/compiler_rt/divsf3.zig45
-rw-r--r--lib/compiler_rt/divtf3.zig30
-rw-r--r--lib/compiler_rt/divti3.zig46
-rw-r--r--lib/compiler_rt/divxf3.zig14
-rw-r--r--lib/compiler_rt/emutls.zig28
-rw-r--r--lib/compiler_rt/exp.zig15
-rw-r--r--lib/compiler_rt/exp2.zig15
-rw-r--r--lib/compiler_rt/extend_f80.zig131
-rw-r--r--lib/compiler_rt/extenddftf2.zig26
-rw-r--r--lib/compiler_rt/extenddfxf2.zig12
-rw-r--r--lib/compiler_rt/extendf.zig (renamed from lib/compiler_rt/extendXfYf2.zig)112
-rw-r--r--lib/compiler_rt/extendf_test.zig (renamed from lib/compiler_rt/extendXfYf2_test.zig)28
-rw-r--r--lib/compiler_rt/extendhfsf2.zig26
-rw-r--r--lib/compiler_rt/extendhftf2.zig12
-rw-r--r--lib/compiler_rt/extendhfxf2.zig12
-rw-r--r--lib/compiler_rt/extendsfdf2.zig20
-rw-r--r--lib/compiler_rt/extendsftf2.zig26
-rw-r--r--lib/compiler_rt/extendsfxf2.zig12
-rw-r--r--lib/compiler_rt/extendxftf2.zig50
-rw-r--r--lib/compiler_rt/fabs.zig15
-rw-r--r--lib/compiler_rt/fixXfYi.zig224
-rw-r--r--lib/compiler_rt/fixdfdi.zig20
-rw-r--r--lib/compiler_rt/fixdfsi.zig20
-rw-r--r--lib/compiler_rt/fixdfti.zig12
-rw-r--r--lib/compiler_rt/fixhfdi.zig12
-rw-r--r--lib/compiler_rt/fixhfsi.zig12
-rw-r--r--lib/compiler_rt/fixhfti.zig12
-rw-r--r--lib/compiler_rt/fixsfdi.zig20
-rw-r--r--lib/compiler_rt/fixsfsi.zig20
-rw-r--r--lib/compiler_rt/fixsfti.zig12
-rw-r--r--lib/compiler_rt/fixtfdi.zig26
-rw-r--r--lib/compiler_rt/fixtfsi.zig26
-rw-r--r--lib/compiler_rt/fixtfti.zig12
-rw-r--r--lib/compiler_rt/fixunsdfdi.zig20
-rw-r--r--lib/compiler_rt/fixunsdfsi.zig20
-rw-r--r--lib/compiler_rt/fixunsdfti.zig12
-rw-r--r--lib/compiler_rt/fixunshfdi.zig12
-rw-r--r--lib/compiler_rt/fixunshfsi.zig12
-rw-r--r--lib/compiler_rt/fixunshfti.zig12
-rw-r--r--lib/compiler_rt/fixunssfdi.zig20
-rw-r--r--lib/compiler_rt/fixunssfsi.zig20
-rw-r--r--lib/compiler_rt/fixunssfti.zig12
-rw-r--r--lib/compiler_rt/fixunstfdi.zig26
-rw-r--r--lib/compiler_rt/fixunstfsi.zig26
-rw-r--r--lib/compiler_rt/fixunstfti.zig12
-rw-r--r--lib/compiler_rt/fixunsxfdi.zig12
-rw-r--r--lib/compiler_rt/fixunsxfsi.zig12
-rw-r--r--lib/compiler_rt/fixunsxfti.zig12
-rw-r--r--lib/compiler_rt/fixxfdi.zig12
-rw-r--r--lib/compiler_rt/fixxfsi.zig12
-rw-r--r--lib/compiler_rt/fixxfti.zig12
-rw-r--r--lib/compiler_rt/floatXiYf.zig222
-rw-r--r--lib/compiler_rt/float_to_int.zig55
-rw-r--r--lib/compiler_rt/float_to_int_test.zig (renamed from lib/compiler_rt/fixXfYi_test.zig)48
-rw-r--r--lib/compiler_rt/floatdidf.zig20
-rw-r--r--lib/compiler_rt/floatdihf.zig12
-rw-r--r--lib/compiler_rt/floatdisf.zig20
-rw-r--r--lib/compiler_rt/floatditf.zig26
-rw-r--r--lib/compiler_rt/floatdixf.zig12
-rw-r--r--lib/compiler_rt/floatsidf.zig20
-rw-r--r--lib/compiler_rt/floatsihf.zig12
-rw-r--r--lib/compiler_rt/floatsisf.zig20
-rw-r--r--lib/compiler_rt/floatsitf.zig26
-rw-r--r--lib/compiler_rt/floatsixf.zig12
-rw-r--r--lib/compiler_rt/floattidf.zig12
-rw-r--r--lib/compiler_rt/floattihf.zig12
-rw-r--r--lib/compiler_rt/floattisf.zig12
-rw-r--r--lib/compiler_rt/floattitf.zig12
-rw-r--r--lib/compiler_rt/floattixf.zig12
-rw-r--r--lib/compiler_rt/floatundidf.zig20
-rw-r--r--lib/compiler_rt/floatundihf.zig12
-rw-r--r--lib/compiler_rt/floatundisf.zig20
-rw-r--r--lib/compiler_rt/floatunditf.zig26
-rw-r--r--lib/compiler_rt/floatundixf.zig12
-rw-r--r--lib/compiler_rt/floatunsidf.zig20
-rw-r--r--lib/compiler_rt/floatunsihf.zig12
-rw-r--r--lib/compiler_rt/floatunsisf.zig20
-rw-r--r--lib/compiler_rt/floatunsitf.zig26
-rw-r--r--lib/compiler_rt/floatunsixf.zig12
-rw-r--r--lib/compiler_rt/floatuntidf.zig12
-rw-r--r--lib/compiler_rt/floatuntihf.zig12
-rw-r--r--lib/compiler_rt/floatuntisf.zig12
-rw-r--r--lib/compiler_rt/floatuntitf.zig20
-rw-r--r--lib/compiler_rt/floatuntixf.zig12
-rw-r--r--lib/compiler_rt/floor.zig25
-rw-r--r--lib/compiler_rt/fma.zig27
-rw-r--r--lib/compiler_rt/fmax.zig15
-rw-r--r--lib/compiler_rt/fmin.zig15
-rw-r--r--lib/compiler_rt/fmod.zig21
-rw-r--r--lib/compiler_rt/gedf2.zig36
-rw-r--r--lib/compiler_rt/gesf2.zig36
-rw-r--r--lib/compiler_rt/getf2.zig39
-rw-r--r--lib/compiler_rt/gexf2.zig17
-rw-r--r--lib/compiler_rt/int.zig66
-rw-r--r--lib/compiler_rt/int_to_float.zig58
-rw-r--r--lib/compiler_rt/int_to_float_test.zig (renamed from lib/compiler_rt/floatXiYf_test.zig)105
-rw-r--r--lib/compiler_rt/log.zig25
-rw-r--r--lib/compiler_rt/log10.zig25
-rw-r--r--lib/compiler_rt/log2.zig25
-rw-r--r--lib/compiler_rt/modti3.zig53
-rw-r--r--lib/compiler_rt/muldf3.zig20
-rw-r--r--lib/compiler_rt/muldi3.zig46
-rw-r--r--lib/compiler_rt/mulf3.zig (renamed from lib/compiler_rt/mulXf3.zig)191
-rw-r--r--lib/compiler_rt/mulf3_test.zig (renamed from lib/compiler_rt/mulXf3_test.zig)8
-rw-r--r--lib/compiler_rt/mulo.zig13
-rw-r--r--lib/compiler_rt/mulsf3.zig20
-rw-r--r--lib/compiler_rt/multf3.zig26
-rw-r--r--lib/compiler_rt/multi3.zig53
-rw-r--r--lib/compiler_rt/mulxf3.zig12
-rw-r--r--lib/compiler_rt/negXf2.zig30
-rw-r--r--lib/compiler_rt/negXi2.zig30
-rw-r--r--lib/compiler_rt/negv.zig42
-rw-r--r--lib/compiler_rt/os_version_check.zig53
-rw-r--r--lib/compiler_rt/parity.zig39
-rw-r--r--lib/compiler_rt/popcount.zig47
-rw-r--r--lib/compiler_rt/round.zig25
-rw-r--r--lib/compiler_rt/shift.zig60
-rw-r--r--lib/compiler_rt/sin.zig25
-rw-r--r--lib/compiler_rt/sincos.zig19
-rw-r--r--lib/compiler_rt/sparc.zig114
-rw-r--r--lib/compiler_rt/sqrt.zig15
-rw-r--r--lib/compiler_rt/stack_probe.zig53
-rw-r--r--lib/compiler_rt/subdf3.zig21
-rw-r--r--lib/compiler_rt/subo.zig39
-rw-r--r--lib/compiler_rt/subsf3.zig21
-rw-r--r--lib/compiler_rt/subtf3.zig30
-rw-r--r--lib/compiler_rt/subxf3.zig15
-rw-r--r--lib/compiler_rt/tan.zig28
-rw-r--r--lib/compiler_rt/trunc.zig25
-rw-r--r--lib/compiler_rt/trunc_f80.zig173
-rw-r--r--lib/compiler_rt/truncdfhf2.zig20
-rw-r--r--lib/compiler_rt/truncdfsf2.zig20
-rw-r--r--lib/compiler_rt/truncf.zig (renamed from lib/compiler_rt/truncXfYf2.zig)133
-rw-r--r--lib/compiler_rt/truncf_test.zig (renamed from lib/compiler_rt/truncXfYf2_test.zig)32
-rw-r--r--lib/compiler_rt/truncsfhf2.zig26
-rw-r--r--lib/compiler_rt/trunctfdf2.zig26
-rw-r--r--lib/compiler_rt/trunctfhf2.zig12
-rw-r--r--lib/compiler_rt/trunctfsf2.zig26
-rw-r--r--lib/compiler_rt/trunctfxf2.zig66
-rw-r--r--lib/compiler_rt/truncxfdf2.zig12
-rw-r--r--lib/compiler_rt/truncxfhf2.zig12
-rw-r--r--lib/compiler_rt/truncxfsf2.zig12
-rw-r--r--lib/compiler_rt/udivmodti4.zig33
-rw-r--r--lib/compiler_rt/udivti3.zig39
-rw-r--r--lib/compiler_rt/umodti3.zig44
-rw-r--r--lib/compiler_rt/unorddf2.zig20
-rw-r--r--lib/compiler_rt/unordsf2.zig20
-rw-r--r--lib/compiler_rt/unordtf2.zig23
-rw-r--r--src/Compilation.zig158
-rw-r--r--src/ThreadPool.zig81
-rw-r--r--src/WaitGroup.zig7
-rw-r--r--src/link.zig7
-rw-r--r--src/link/Coff.zig2
-rw-r--r--src/link/Elf.zig2
-rw-r--r--src/link/MachO.zig50
-rw-r--r--src/link/Wasm.zig2
-rw-r--r--src/musl.zig1
194 files changed, 4564 insertions, 3182 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cc962fcff1..29f521c789 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -480,8 +480,15 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/sort.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absv.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addXf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvdi2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvsi2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvti2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/adddf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/arm.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/atomics.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/aulldiv.zig"
@@ -490,7 +497,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/ceil.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/clear_cache.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmp.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/compareXf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmptf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/common.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/comparef.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cos.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/count0bits.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/divdf3.zig"
@@ -501,25 +513,101 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/emutls.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extend_f80.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendxftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fabs.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixXfYi.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatXiYf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_to_int.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floor.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fma.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmax.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmin.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmod.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gedf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gesf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/getf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gexf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_to_float.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulXf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldi3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/multf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multi3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXi2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negv.zig"
@@ -533,19 +621,34 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/shift.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sin.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sincos.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/sparc.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sqrt.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/stack_probe.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subdf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/tan.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trig.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc_f80.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncsfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmod.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodti4.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/umodti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unorddf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordtf2.zig"
"${CMAKE_SOURCE_DIR}/lib/std/start.zig"
"${CMAKE_SOURCE_DIR}/lib/std/std.zig"
"${CMAKE_SOURCE_DIR}/lib/std/target.zig"
diff --git a/ci/azure/macos_script b/ci/azure/macos_script
index 9e32e7803e..e958dc28de 100755
--- a/ci/azure/macos_script
+++ b/ci/azure/macos_script
@@ -75,7 +75,7 @@ release/bin/zig build test-translate-c -Denable-macos-sdk
release/bin/zig build test-run-translated-c -Denable-macos-sdk
release/bin/zig build docs -Denable-macos-sdk
release/bin/zig build test-fmt -Denable-macos-sdk
-release/bin/zig build test-cases -Denable-macos-sdk
+release/bin/zig build test-cases -Denable-macos-sdk -Dsingle-threaded
if [ "${BUILD_REASON}" != "PullRequest" ]; then
mv ../LICENSE release/
diff --git a/lib/compiler_rt.zig b/lib/compiler_rt.zig
index 563d3d0820..5f199e9eae 100644
--- a/lib/compiler_rt.zig
+++ b/lib/compiler_rt.zig
@@ -1,885 +1,182 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const os_tag = builtin.os.tag;
-const arch = builtin.cpu.arch;
-const abi = builtin.abi;
-
-const is_gnu = abi.isGnu();
-const is_mingw = os_tag == .windows and is_gnu;
-const is_darwin = std.Target.Os.Tag.isDarwin(os_tag);
-const is_ppc = arch.isPPC() or arch.isPPC64();
-
-const linkage = if (is_test)
- std.builtin.GlobalLinkage.Internal
-else
- std.builtin.GlobalLinkage.Weak;
-
-const strong_linkage = if (is_test)
- std.builtin.GlobalLinkage.Internal
-else
- std.builtin.GlobalLinkage.Strong;
+pub const panic = @import("compiler_rt/common.zig").panic;
comptime {
- // These files do their own comptime exporting logic.
_ = @import("compiler_rt/atomics.zig");
- if (builtin.zig_backend != .stage2_llvm) { // TODO
- _ = @import("compiler_rt/clear_cache.zig").clear_cache;
- }
-
- const __extenddftf2 = @import("compiler_rt/extendXfYf2.zig").__extenddftf2;
- @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = linkage });
- const __extendsftf2 = @import("compiler_rt/extendXfYf2.zig").__extendsftf2;
- @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = linkage });
- const __extendhfsf2 = @import("compiler_rt/extendXfYf2.zig").__extendhfsf2;
- @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = linkage });
- const __extendhftf2 = @import("compiler_rt/extendXfYf2.zig").__extendhftf2;
- @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage });
-
- const __extendhfxf2 = @import("compiler_rt/extend_f80.zig").__extendhfxf2;
- @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = linkage });
- const __extendsfxf2 = @import("compiler_rt/extend_f80.zig").__extendsfxf2;
- @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = linkage });
- const __extenddfxf2 = @import("compiler_rt/extend_f80.zig").__extenddfxf2;
- @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = linkage });
- const __extendxftf2 = @import("compiler_rt/extend_f80.zig").__extendxftf2;
- @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
-
- const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
- @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
- const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
- @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
- const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
- @export(__letf2, .{ .name = "__letf2", .linkage = linkage });
- const __lexf2 = @import("compiler_rt/compareXf2.zig").__lexf2;
- @export(__lexf2, .{ .name = "__lexf2", .linkage = linkage });
-
- const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
- @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
- const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2;
- @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
- const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
- @export(__getf2, .{ .name = "__getf2", .linkage = linkage });
- const __gexf2 = @import("compiler_rt/compareXf2.zig").__gexf2;
- @export(__gexf2, .{ .name = "__gexf2", .linkage = linkage });
-
- const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
- @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
- const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
- @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
- const __eqxf2 = @import("compiler_rt/compareXf2.zig").__eqxf2;
- @export(__eqxf2, .{ .name = "__eqxf2", .linkage = linkage });
-
- const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
- @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
- const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
- @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
- const __ltxf2 = @import("compiler_rt/compareXf2.zig").__ltxf2;
- @export(__ltxf2, .{ .name = "__ltxf2", .linkage = linkage });
-
- const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
- @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
- const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
- @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
- const __nexf2 = @import("compiler_rt/compareXf2.zig").__nexf2;
- @export(__nexf2, .{ .name = "__nexf2", .linkage = linkage });
-
- const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
- @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
- const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
- @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
- const __gtxf2 = @import("compiler_rt/compareXf2.zig").__gtxf2;
- @export(__gtxf2, .{ .name = "__gtxf2", .linkage = linkage });
-
- if (!is_test) {
- @export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
- @export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__cmptf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__eqtf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__lttf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__netf2", .linkage = linkage });
- @export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = linkage });
- }
-
- if (builtin.os.tag == .windows) {
- // Default stack-probe functions emitted by LLVM
- if (is_mingw) {
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
- const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
- @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
- } else if (!builtin.link_libc) {
- // This symbols are otherwise exported by MSVCRT.lib
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- }
-
- switch (arch) {
- .i386 => {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- },
- .x86_64 => {
- // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
- // that LLVM expects compiler-rt to have.
- const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
- @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
- const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
- @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
- const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
- @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
- @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
- @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
- @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
- },
- else => {},
- }
- if (arch.isAARCH64()) {
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- }
- } else {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- }
-
- const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2;
- @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage });
- const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2;
- @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage });
- const __trunctfdf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfdf2;
- @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage });
- const __trunctfsf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfsf2;
- @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage });
-
- const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2;
- @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
-
- const __truncxfhf2 = @import("compiler_rt/trunc_f80.zig").__truncxfhf2;
- @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = linkage });
- const __truncxfsf2 = @import("compiler_rt/trunc_f80.zig").__truncxfsf2;
- @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = linkage });
- const __truncxfdf2 = @import("compiler_rt/trunc_f80.zig").__truncxfdf2;
- @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = linkage });
- const __trunctfxf2 = @import("compiler_rt/trunc_f80.zig").__trunctfxf2;
- @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
-
- switch (arch) {
- .i386,
- .x86_64,
- => {
- const zig_probe_stack = @import("compiler_rt/stack_probe.zig").zig_probe_stack;
- @export(zig_probe_stack, .{
- .name = "__zig_probe_stack",
- .linkage = linkage,
- });
- },
- else => {},
- }
-
- const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
- @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
- const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
- @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
- const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2;
- @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
-
- const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3;
- @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
- const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
- @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
- const __addxf3 = @import("compiler_rt/addXf3.zig").__addxf3;
- @export(__addxf3, .{ .name = "__addxf3", .linkage = linkage });
- const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
- @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
-
- const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
- @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
- const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
- @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
- const __subxf3 = @import("compiler_rt/addXf3.zig").__subxf3;
- @export(__subxf3, .{ .name = "__subxf3", .linkage = linkage });
- const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
- @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
-
- const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
- @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
- const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
- @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
- const __mulxf3 = @import("compiler_rt/mulXf3.zig").__mulxf3;
- @export(__mulxf3, .{ .name = "__mulxf3", .linkage = linkage });
- const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
- @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
-
- const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3;
- @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage });
- const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3;
- @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage });
- const __divxf3 = @import("compiler_rt/divxf3.zig").__divxf3;
- @export(__divxf3, .{ .name = "__divxf3", .linkage = linkage });
- const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3;
- @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage });
-
- // Integer Bit operations
- const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2;
- @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage });
- const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2;
- @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage });
- const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2;
- @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage });
- const __ctzsi2 = @import("compiler_rt/count0bits.zig").__ctzsi2;
- @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = linkage });
- const __ctzdi2 = @import("compiler_rt/count0bits.zig").__ctzdi2;
- @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = linkage });
- const __ctzti2 = @import("compiler_rt/count0bits.zig").__ctzti2;
- @export(__ctzti2, .{ .name = "__ctzti2", .linkage = linkage });
- const __ffssi2 = @import("compiler_rt/count0bits.zig").__ffssi2;
- @export(__ffssi2, .{ .name = "__ffssi2", .linkage = linkage });
- const __ffsdi2 = @import("compiler_rt/count0bits.zig").__ffsdi2;
- @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = linkage });
- const __ffsti2 = @import("compiler_rt/count0bits.zig").__ffsti2;
- @export(__ffsti2, .{ .name = "__ffsti2", .linkage = linkage });
- const __paritysi2 = @import("compiler_rt/parity.zig").__paritysi2;
- @export(__paritysi2, .{ .name = "__paritysi2", .linkage = linkage });
- const __paritydi2 = @import("compiler_rt/parity.zig").__paritydi2;
- @export(__paritydi2, .{ .name = "__paritydi2", .linkage = linkage });
- const __parityti2 = @import("compiler_rt/parity.zig").__parityti2;
- @export(__parityti2, .{ .name = "__parityti2", .linkage = linkage });
- const __popcountsi2 = @import("compiler_rt/popcount.zig").__popcountsi2;
- @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = linkage });
- const __popcountdi2 = @import("compiler_rt/popcount.zig").__popcountdi2;
- @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage });
- const __popcountti2 = @import("compiler_rt/popcount.zig").__popcountti2;
- @export(__popcountti2, .{ .name = "__popcountti2", .linkage = linkage });
- const __bswapsi2 = @import("compiler_rt/bswap.zig").__bswapsi2;
- @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = linkage });
- const __bswapdi2 = @import("compiler_rt/bswap.zig").__bswapdi2;
- @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = linkage });
- const __bswapti2 = @import("compiler_rt/bswap.zig").__bswapti2;
- @export(__bswapti2, .{ .name = "__bswapti2", .linkage = linkage });
-
- // Integral -> Float Conversion
-
- // Conversion to f32
- const __floatsisf = @import("compiler_rt/floatXiYf.zig").__floatsisf;
- @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
- const __floatunsisf = @import("compiler_rt/floatXiYf.zig").__floatunsisf;
- @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
-
- const __floatundisf = @import("compiler_rt/floatXiYf.zig").__floatundisf;
- @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage });
- const __floatdisf = @import("compiler_rt/floatXiYf.zig").__floatdisf;
- @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
-
- const __floattisf = @import("compiler_rt/floatXiYf.zig").__floattisf;
- @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
- const __floatuntisf = @import("compiler_rt/floatXiYf.zig").__floatuntisf;
- @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
-
- // Conversion to f64
- const __floatsidf = @import("compiler_rt/floatXiYf.zig").__floatsidf;
- @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
- const __floatunsidf = @import("compiler_rt/floatXiYf.zig").__floatunsidf;
- @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
-
- const __floatdidf = @import("compiler_rt/floatXiYf.zig").__floatdidf;
- @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
- const __floatundidf = @import("compiler_rt/floatXiYf.zig").__floatundidf;
- @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
-
- const __floattidf = @import("compiler_rt/floatXiYf.zig").__floattidf;
- @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
- const __floatuntidf = @import("compiler_rt/floatXiYf.zig").__floatuntidf;
- @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
-
- // Conversion to f80
- const __floatsixf = @import("compiler_rt/floatXiYf.zig").__floatsixf;
- @export(__floatsixf, .{ .name = "__floatsixf", .linkage = linkage });
- const __floatunsixf = @import("compiler_rt/floatXiYf.zig").__floatunsixf;
- @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = linkage });
-
- const __floatdixf = @import("compiler_rt/floatXiYf.zig").__floatdixf;
- @export(__floatdixf, .{ .name = "__floatdixf", .linkage = linkage });
- const __floatundixf = @import("compiler_rt/floatXiYf.zig").__floatundixf;
- @export(__floatundixf, .{ .name = "__floatundixf", .linkage = linkage });
-
- const __floattixf = @import("compiler_rt/floatXiYf.zig").__floattixf;
- @export(__floattixf, .{ .name = "__floattixf", .linkage = linkage });
- const __floatuntixf = @import("compiler_rt/floatXiYf.zig").__floatuntixf;
- @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = linkage });
-
- // Conversion to f128
- const __floatsitf = @import("compiler_rt/floatXiYf.zig").__floatsitf;
- @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
- const __floatunsitf = @import("compiler_rt/floatXiYf.zig").__floatunsitf;
- @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
-
- const __floatditf = @import("compiler_rt/floatXiYf.zig").__floatditf;
- @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
- const __floatunditf = @import("compiler_rt/floatXiYf.zig").__floatunditf;
- @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
-
- const __floattitf = @import("compiler_rt/floatXiYf.zig").__floattitf;
- @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
- const __floatuntitf = @import("compiler_rt/floatXiYf.zig").__floatuntitf;
- @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
-
- // Float -> Integral Conversion
-
- // Conversion from f32
- const __fixsfsi = @import("compiler_rt/fixXfYi.zig").__fixsfsi;
- @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
- const __fixunssfsi = @import("compiler_rt/fixXfYi.zig").__fixunssfsi;
- @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
-
- const __fixsfdi = @import("compiler_rt/fixXfYi.zig").__fixsfdi;
- @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
- const __fixunssfdi = @import("compiler_rt/fixXfYi.zig").__fixunssfdi;
- @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
-
- const __fixsfti = @import("compiler_rt/fixXfYi.zig").__fixsfti;
- @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
- const __fixunssfti = @import("compiler_rt/fixXfYi.zig").__fixunssfti;
- @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
-
- // Conversion from f64
- const __fixdfsi = @import("compiler_rt/fixXfYi.zig").__fixdfsi;
- @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
- const __fixunsdfsi = @import("compiler_rt/fixXfYi.zig").__fixunsdfsi;
- @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
-
- const __fixdfdi = @import("compiler_rt/fixXfYi.zig").__fixdfdi;
- @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
- const __fixunsdfdi = @import("compiler_rt/fixXfYi.zig").__fixunsdfdi;
- @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
-
- const __fixdfti = @import("compiler_rt/fixXfYi.zig").__fixdfti;
- @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
- const __fixunsdfti = @import("compiler_rt/fixXfYi.zig").__fixunsdfti;
- @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
-
- // Conversion from f80
- const __fixxfsi = @import("compiler_rt/fixXfYi.zig").__fixxfsi;
- @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = linkage });
- const __fixunsxfsi = @import("compiler_rt/fixXfYi.zig").__fixunsxfsi;
- @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = linkage });
-
- const __fixxfdi = @import("compiler_rt/fixXfYi.zig").__fixxfdi;
- @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = linkage });
- const __fixunsxfdi = @import("compiler_rt/fixXfYi.zig").__fixunsxfdi;
- @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = linkage });
-
- const __fixxfti = @import("compiler_rt/fixXfYi.zig").__fixxfti;
- @export(__fixxfti, .{ .name = "__fixxfti", .linkage = linkage });
- const __fixunsxfti = @import("compiler_rt/fixXfYi.zig").__fixunsxfti;
- @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = linkage });
-
- // Conversion from f128
- const __fixtfsi = @import("compiler_rt/fixXfYi.zig").__fixtfsi;
- @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
- const __fixunstfsi = @import("compiler_rt/fixXfYi.zig").__fixunstfsi;
- @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
-
- const __fixtfdi = @import("compiler_rt/fixXfYi.zig").__fixtfdi;
- @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
- const __fixunstfdi = @import("compiler_rt/fixXfYi.zig").__fixunstfdi;
- @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
-
- const __fixtfti = @import("compiler_rt/fixXfYi.zig").__fixtfti;
- @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
- const __fixunstfti = @import("compiler_rt/fixXfYi.zig").__fixunstfti;
- @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
-
- const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4;
- @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage });
-
- const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
- if (!is_test) {
- @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
- }
- const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2;
- @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
-
- if (is_darwin) {
- const __isPlatformVersionAtLeast = @import("compiler_rt/os_version_check.zig").__isPlatformVersionAtLeast;
- @export(__isPlatformVersionAtLeast, .{ .name = "__isPlatformVersionAtLeast", .linkage = linkage });
- }
-
- // Integer Arithmetic
- const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3;
- @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage });
- const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3;
- @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage });
- const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3;
- @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage });
- const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3;
- @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage });
- const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3;
- @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage });
- const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3;
- @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage });
- const __negsi2 = @import("compiler_rt/negXi2.zig").__negsi2;
- @export(__negsi2, .{ .name = "__negsi2", .linkage = linkage });
- const __negdi2 = @import("compiler_rt/negXi2.zig").__negdi2;
- @export(__negdi2, .{ .name = "__negdi2", .linkage = linkage });
- const __negti2 = @import("compiler_rt/negXi2.zig").__negti2;
- @export(__negti2, .{ .name = "__negti2", .linkage = linkage });
-
- const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3;
- @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage });
- const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3;
- @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage });
- const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4;
- @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage });
- const __divsi3 = @import("compiler_rt/int.zig").__divsi3;
- @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage });
- const __divdi3 = @import("compiler_rt/int.zig").__divdi3;
- @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage });
- const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3;
- @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage });
- const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3;
- @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage });
- const __modsi3 = @import("compiler_rt/int.zig").__modsi3;
- @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage });
- const __moddi3 = @import("compiler_rt/int.zig").__moddi3;
- @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage });
- const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3;
- @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage });
- const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3;
- @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage });
- const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4;
- @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage });
- const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4;
- @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage });
-
- // Integer Arithmetic with trapping overflow
- const __absvsi2 = @import("compiler_rt/absv.zig").__absvsi2;
- @export(__absvsi2, .{ .name = "__absvsi2", .linkage = linkage });
- const __absvdi2 = @import("compiler_rt/absv.zig").__absvdi2;
- @export(__absvdi2, .{ .name = "__absvdi2", .linkage = linkage });
- const __absvti2 = @import("compiler_rt/absv.zig").__absvti2;
- @export(__absvti2, .{ .name = "__absvti2", .linkage = linkage });
- const __negvsi2 = @import("compiler_rt/negv.zig").__negvsi2;
- @export(__negvsi2, .{ .name = "__negvsi2", .linkage = linkage });
- const __negvdi2 = @import("compiler_rt/negv.zig").__negvdi2;
- @export(__negvdi2, .{ .name = "__negvdi2", .linkage = linkage });
- const __negvti2 = @import("compiler_rt/negv.zig").__negvti2;
- @export(__negvti2, .{ .name = "__negvti2", .linkage = linkage });
-
- // Integer arithmetic which returns if overflow
- const __addosi4 = @import("compiler_rt/addo.zig").__addosi4;
- @export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
- const __addodi4 = @import("compiler_rt/addo.zig").__addodi4;
- @export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
- const __addoti4 = @import("compiler_rt/addo.zig").__addoti4;
- @export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
- const __subosi4 = @import("compiler_rt/subo.zig").__subosi4;
- @export(__subosi4, .{ .name = "__subosi4", .linkage = linkage });
- const __subodi4 = @import("compiler_rt/subo.zig").__subodi4;
- @export(__subodi4, .{ .name = "__subodi4", .linkage = linkage });
- const __suboti4 = @import("compiler_rt/subo.zig").__suboti4;
- @export(__suboti4, .{ .name = "__suboti4", .linkage = linkage });
- const __mulosi4 = @import("compiler_rt/mulo.zig").__mulosi4;
- @export(__mulosi4, .{ .name = "__mulosi4", .linkage = linkage });
- const __mulodi4 = @import("compiler_rt/mulo.zig").__mulodi4;
- @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
- const __muloti4 = @import("compiler_rt/mulo.zig").__muloti4;
- @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage });
-
- // Integer Comparison
- // (a < b) => 0
- // (a == b) => 1
- // (a > b) => 2
- const __cmpsi2 = @import("compiler_rt/cmp.zig").__cmpsi2;
- @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = linkage });
- const __cmpdi2 = @import("compiler_rt/cmp.zig").__cmpdi2;
- @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = linkage });
- const __cmpti2 = @import("compiler_rt/cmp.zig").__cmpti2;
- @export(__cmpti2, .{ .name = "__cmpti2", .linkage = linkage });
- const __ucmpsi2 = @import("compiler_rt/cmp.zig").__ucmpsi2;
- @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = linkage });
- const __ucmpdi2 = @import("compiler_rt/cmp.zig").__ucmpdi2;
- @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = linkage });
- const __ucmpti2 = @import("compiler_rt/cmp.zig").__ucmpti2;
- @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = linkage });
-
- // missing: Floating point raised to integer power
-
- // missing: Complex arithmetic
- // (a + ib) * (c + id)
- // (a + ib) / (c + id)
-
- const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2;
- @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage });
- const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2;
- @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage });
-
- if (builtin.link_libc and os_tag == .openbsd) {
- const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address;
- @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage });
- }
-
- if ((arch.isARM() or arch.isThumb()) and !is_test) {
- const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0;
- @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage });
- const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1;
- @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage });
- const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2;
- @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage });
-
- @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage });
-
- const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod;
- @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage });
- const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod;
- @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage });
-
- @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage });
- const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod;
- @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage });
- @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage });
- const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod;
- @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage });
-
- const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy;
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage });
-
- const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove;
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage });
-
- const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset;
- @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage });
-
- const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr;
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage });
-
- if (os_tag == .linux) {
- const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp;
- @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage });
- }
-
- const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d;
- @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
- const __aeabi_i2d = @import("compiler_rt/floatXiYf.zig").__aeabi_i2d;
- @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
- const __aeabi_l2d = @import("compiler_rt/floatXiYf.zig").__aeabi_l2d;
- @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
- const __aeabi_l2f = @import("compiler_rt/floatXiYf.zig").__aeabi_l2f;
- @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
- const __aeabi_ui2d = @import("compiler_rt/floatXiYf.zig").__aeabi_ui2d;
- @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
- const __aeabi_ul2d = @import("compiler_rt/floatXiYf.zig").__aeabi_ul2d;
- @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
- const __aeabi_ui2f = @import("compiler_rt/floatXiYf.zig").__aeabi_ui2f;
- @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
- const __aeabi_ul2f = @import("compiler_rt/floatXiYf.zig").__aeabi_ul2f;
- @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
-
- const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg;
- @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage });
- const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg;
- @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage });
-
- const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul;
- @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
- const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul;
- @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
-
- const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h;
- @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
-
- const __aeabi_f2ulz = @import("compiler_rt/fixXfYi.zig").__aeabi_f2ulz;
- @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
- const __aeabi_d2ulz = @import("compiler_rt/fixXfYi.zig").__aeabi_d2ulz;
- @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
-
- const __aeabi_f2lz = @import("compiler_rt/fixXfYi.zig").__aeabi_f2lz;
- @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
- const __aeabi_d2lz = @import("compiler_rt/fixXfYi.zig").__aeabi_d2lz;
- @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
-
- const __aeabi_d2uiz = @import("compiler_rt/fixXfYi.zig").__aeabi_d2uiz;
- @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
-
- const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f;
- @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
- const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h;
- @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
-
- const __aeabi_i2f = @import("compiler_rt/floatXiYf.zig").__aeabi_i2f;
- @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
- const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f;
- @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
-
- const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd;
- @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
- const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd;
- @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
- const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub;
- @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
- const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub;
- @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
-
- const __aeabi_f2uiz = @import("compiler_rt/fixXfYi.zig").__aeabi_f2uiz;
- @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
-
- const __aeabi_f2iz = @import("compiler_rt/fixXfYi.zig").__aeabi_f2iz;
- @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
- const __aeabi_d2iz = @import("compiler_rt/fixXfYi.zig").__aeabi_d2iz;
- @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
-
- const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv;
- @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage });
- const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv;
- @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage });
-
- const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl;
- @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage });
- const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr;
- @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage });
- const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr;
- @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage });
-
- const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq;
- @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
- const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt;
- @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
- const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple;
- @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
- const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge;
- @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
- const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt;
- @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
- const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun;
- @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
-
- const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq;
- @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
- const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt;
- @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
- const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple;
- @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
- const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge;
- @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
- const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt;
- @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
- const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun;
- @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
- }
-
- if (arch == .i386 and abi == .msvc) {
- // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
- const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv;
- @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage });
- const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv;
- @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage });
- const _allrem = @import("compiler_rt/aullrem.zig")._allrem;
- @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage });
- const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem;
- @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage });
- }
-
- mathExport("ceil", @import("./compiler_rt/ceil.zig"));
- mathExport("cos", @import("./compiler_rt/cos.zig"));
- mathExport("exp", @import("./compiler_rt/exp.zig"));
- mathExport("exp2", @import("./compiler_rt/exp2.zig"));
- mathExport("fabs", @import("./compiler_rt/fabs.zig"));
- mathExport("floor", @import("./compiler_rt/floor.zig"));
- mathExport("fma", @import("./compiler_rt/fma.zig"));
- mathExport("fmax", @import("./compiler_rt/fmax.zig"));
- mathExport("fmin", @import("./compiler_rt/fmin.zig"));
- mathExport("fmod", @import("./compiler_rt/fmod.zig"));
- mathExport("log", @import("./compiler_rt/log.zig"));
- mathExport("log10", @import("./compiler_rt/log10.zig"));
- mathExport("log2", @import("./compiler_rt/log2.zig"));
- mathExport("round", @import("./compiler_rt/round.zig"));
- mathExport("sin", @import("./compiler_rt/sin.zig"));
- mathExport("sincos", @import("./compiler_rt/sincos.zig"));
- mathExport("sqrt", @import("./compiler_rt/sqrt.zig"));
- mathExport("tan", @import("./compiler_rt/tan.zig"));
- mathExport("trunc", @import("./compiler_rt/trunc.zig"));
-
- if (arch.isSPARC()) {
- // SPARC systems use a different naming scheme
- const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add;
- @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage });
- const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div;
- @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage });
- const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul;
- @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage });
- const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub;
- @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage });
-
- const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp;
- @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage });
- const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq;
- @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage });
- const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne;
- @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage });
- const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt;
- @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage });
- const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle;
- @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage });
- const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt;
- @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
- const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge;
- @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
-
- const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq;
- @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
- const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq;
- @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
- const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq;
- @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
- const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq;
- @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
- const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq;
- @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
- const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq;
- @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
- const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi;
- @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
- const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui;
- @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
- const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox;
- @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
- const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux;
- @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
- const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos;
- @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
- const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod;
- @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
- }
-
- if (is_ppc and !is_test) {
- @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage });
- @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage });
- @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage });
- @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage });
- @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage });
- @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage });
- @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
- @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
- @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage });
- @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage });
- @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
- @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
- @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage });
- @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage });
- @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage });
- @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage });
- @export(__floatuntitf, .{ .name = "__floatuntikf", .linkage = linkage });
-
- @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__nekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gekf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__lekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage });
- @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
- }
-}
-
-inline fn mathExport(double_name: []const u8, comptime import: type) void {
- const half_name = "__" ++ double_name ++ "h";
- const half_fn = @field(import, half_name);
- const float_name = double_name ++ "f";
- const float_fn = @field(import, float_name);
- const double_fn = @field(import, double_name);
- const long_double_name = double_name ++ "l";
- const xf80_name = "__" ++ double_name ++ "x";
- const xf80_fn = @field(import, xf80_name);
- const quad_name = double_name ++ "q";
- const quad_fn = @field(import, quad_name);
-
- @export(half_fn, .{ .name = half_name, .linkage = linkage });
- @export(float_fn, .{ .name = float_name, .linkage = linkage });
- @export(double_fn, .{ .name = double_name, .linkage = linkage });
- @export(xf80_fn, .{ .name = xf80_name, .linkage = linkage });
- @export(quad_fn, .{ .name = quad_name, .linkage = linkage });
-
- if (is_test) return;
-
- const pairs = .{
- .{ f16, half_fn },
- .{ f32, float_fn },
- .{ f64, double_fn },
- .{ f80, xf80_fn },
- .{ f128, quad_fn },
- };
-
- if (builtin.os.tag == .windows) {
- // Weak aliases don't work on Windows, so we have to provide the 'l' variants
- // as additional function definitions that jump to the real definition.
- const long_double_fn = @field(import, long_double_name);
- @export(long_double_fn, .{ .name = long_double_name, .linkage = linkage });
- } else {
- inline for (pairs) |pair| {
- const F = pair[0];
- const func = pair[1];
- if (builtin.target.longDoubleIs(F)) {
- @export(func, .{ .name = long_double_name, .linkage = linkage });
- }
- }
- }
-
- if (is_ppc) {
- // LLVM PPC backend lowers f128 ops with the suffix `f128` instead of `l`.
- @export(quad_fn, .{ .name = double_name ++ "f128", .linkage = linkage });
- }
-}
-// Avoid dragging in the runtime safety mechanisms into this .o file,
-// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
- _ = error_return_trace;
- @setCold(true);
- if (is_test) {
- std.debug.panic("{s}", .{msg});
- } else {
- unreachable;
- }
+ _ = @import("compiler_rt/addf3.zig");
+ _ = @import("compiler_rt/addsf3.zig");
+ _ = @import("compiler_rt/addtf3.zig");
+ _ = @import("compiler_rt/addxf3.zig");
+ _ = @import("compiler_rt/subdf3.zig");
+ _ = @import("compiler_rt/subsf3.zig");
+ _ = @import("compiler_rt/subtf3.zig");
+ _ = @import("compiler_rt/subxf3.zig");
+
+ _ = @import("compiler_rt/mulf3.zig");
+ _ = @import("compiler_rt/muldf3.zig");
+ _ = @import("compiler_rt/mulsf3.zig");
+ _ = @import("compiler_rt/multf3.zig");
+ _ = @import("compiler_rt/mulxf3.zig");
+
+ _ = @import("compiler_rt/comparef.zig");
+ _ = @import("compiler_rt/cmpsf2.zig");
+ _ = @import("compiler_rt/cmpdf2.zig");
+ _ = @import("compiler_rt/cmptf2.zig");
+ _ = @import("compiler_rt/cmpxf2.zig");
+ _ = @import("compiler_rt/gesf2.zig");
+ _ = @import("compiler_rt/gedf2.zig");
+ _ = @import("compiler_rt/getf2.zig");
+ _ = @import("compiler_rt/gexf2.zig");
+ _ = @import("compiler_rt/unordsf2.zig");
+ _ = @import("compiler_rt/unorddf2.zig");
+ _ = @import("compiler_rt/unordtf2.zig");
+
+ _ = @import("compiler_rt/extendf.zig");
+ _ = @import("compiler_rt/extenddftf2.zig");
+ _ = @import("compiler_rt/extenddfxf2.zig");
+ _ = @import("compiler_rt/extendhfsf2.zig");
+ _ = @import("compiler_rt/extendhftf2.zig");
+ _ = @import("compiler_rt/extendhfxf2.zig");
+ _ = @import("compiler_rt/extendsfdf2.zig");
+ _ = @import("compiler_rt/extendsftf2.zig");
+ _ = @import("compiler_rt/extendsfxf2.zig");
+ _ = @import("compiler_rt/extendxftf2.zig");
+
+ _ = @import("compiler_rt/truncf.zig");
+ _ = @import("compiler_rt/truncsfhf2.zig");
+ _ = @import("compiler_rt/truncdfhf2.zig");
+ _ = @import("compiler_rt/truncdfsf2.zig");
+ _ = @import("compiler_rt/trunctfhf2.zig");
+ _ = @import("compiler_rt/trunctfsf2.zig");
+ _ = @import("compiler_rt/trunctfdf2.zig");
+ _ = @import("compiler_rt/trunctfxf2.zig");
+ _ = @import("compiler_rt/truncxfhf2.zig");
+ _ = @import("compiler_rt/truncxfsf2.zig");
+ _ = @import("compiler_rt/truncxfdf2.zig");
+
+ _ = @import("compiler_rt/divtf3.zig");
+ _ = @import("compiler_rt/divsf3.zig");
+ _ = @import("compiler_rt/divdf3.zig");
+ _ = @import("compiler_rt/divxf3.zig");
+ _ = @import("compiler_rt/sin.zig");
+ _ = @import("compiler_rt/cos.zig");
+ _ = @import("compiler_rt/sincos.zig");
+ _ = @import("compiler_rt/ceil.zig");
+ _ = @import("compiler_rt/exp.zig");
+ _ = @import("compiler_rt/exp2.zig");
+ _ = @import("compiler_rt/fabs.zig");
+ _ = @import("compiler_rt/floor.zig");
+ _ = @import("compiler_rt/fma.zig");
+ _ = @import("compiler_rt/fmax.zig");
+ _ = @import("compiler_rt/fmin.zig");
+ _ = @import("compiler_rt/fmod.zig");
+ _ = @import("compiler_rt/log.zig");
+ _ = @import("compiler_rt/log10.zig");
+ _ = @import("compiler_rt/log2.zig");
+ _ = @import("compiler_rt/round.zig");
+ _ = @import("compiler_rt/sqrt.zig");
+ _ = @import("compiler_rt/tan.zig");
+ _ = @import("compiler_rt/trunc.zig");
+ _ = @import("compiler_rt/stack_probe.zig");
+ _ = @import("compiler_rt/divti3.zig");
+ _ = @import("compiler_rt/modti3.zig");
+ _ = @import("compiler_rt/multi3.zig");
+ _ = @import("compiler_rt/udivti3.zig");
+ _ = @import("compiler_rt/udivmodti4.zig");
+ _ = @import("compiler_rt/umodti3.zig");
+
+ _ = @import("compiler_rt/int_to_float.zig");
+ _ = @import("compiler_rt/floatsihf.zig");
+ _ = @import("compiler_rt/floatsisf.zig");
+ _ = @import("compiler_rt/floatsidf.zig");
+ _ = @import("compiler_rt/floatsitf.zig");
+ _ = @import("compiler_rt/floatsixf.zig");
+ _ = @import("compiler_rt/floatdihf.zig");
+ _ = @import("compiler_rt/floatdisf.zig");
+ _ = @import("compiler_rt/floatdidf.zig");
+ _ = @import("compiler_rt/floatditf.zig");
+ _ = @import("compiler_rt/floatdixf.zig");
+ _ = @import("compiler_rt/floattihf.zig");
+ _ = @import("compiler_rt/floattisf.zig");
+ _ = @import("compiler_rt/floattidf.zig");
+ _ = @import("compiler_rt/floattitf.zig");
+ _ = @import("compiler_rt/floattixf.zig");
+ _ = @import("compiler_rt/floatundihf.zig");
+ _ = @import("compiler_rt/floatundisf.zig");
+ _ = @import("compiler_rt/floatundidf.zig");
+ _ = @import("compiler_rt/floatunditf.zig");
+ _ = @import("compiler_rt/floatundixf.zig");
+ _ = @import("compiler_rt/floatunsihf.zig");
+ _ = @import("compiler_rt/floatunsisf.zig");
+ _ = @import("compiler_rt/floatunsidf.zig");
+ _ = @import("compiler_rt/floatunsitf.zig");
+ _ = @import("compiler_rt/floatunsixf.zig");
+ _ = @import("compiler_rt/floatuntihf.zig");
+ _ = @import("compiler_rt/floatuntisf.zig");
+ _ = @import("compiler_rt/floatuntidf.zig");
+ _ = @import("compiler_rt/floatuntitf.zig");
+ _ = @import("compiler_rt/floatuntixf.zig");
+
+ _ = @import("compiler_rt/float_to_int.zig");
+ _ = @import("compiler_rt/fixhfsi.zig");
+ _ = @import("compiler_rt/fixhfdi.zig");
+ _ = @import("compiler_rt/fixhfti.zig");
+ _ = @import("compiler_rt/fixsfsi.zig");
+ _ = @import("compiler_rt/fixsfdi.zig");
+ _ = @import("compiler_rt/fixsfti.zig");
+ _ = @import("compiler_rt/fixdfsi.zig");
+ _ = @import("compiler_rt/fixdfdi.zig");
+ _ = @import("compiler_rt/fixdfti.zig");
+ _ = @import("compiler_rt/fixtfsi.zig");
+ _ = @import("compiler_rt/fixtfdi.zig");
+ _ = @import("compiler_rt/fixtfti.zig");
+ _ = @import("compiler_rt/fixxfsi.zig");
+ _ = @import("compiler_rt/fixxfdi.zig");
+ _ = @import("compiler_rt/fixxfti.zig");
+ _ = @import("compiler_rt/fixunshfsi.zig");
+ _ = @import("compiler_rt/fixunshfdi.zig");
+ _ = @import("compiler_rt/fixunshfti.zig");
+ _ = @import("compiler_rt/fixunssfsi.zig");
+ _ = @import("compiler_rt/fixunssfdi.zig");
+ _ = @import("compiler_rt/fixunssfti.zig");
+ _ = @import("compiler_rt/fixunsdfsi.zig");
+ _ = @import("compiler_rt/fixunsdfdi.zig");
+ _ = @import("compiler_rt/fixunsdfti.zig");
+ _ = @import("compiler_rt/fixunstfsi.zig");
+ _ = @import("compiler_rt/fixunstfdi.zig");
+ _ = @import("compiler_rt/fixunstfti.zig");
+ _ = @import("compiler_rt/fixunsxfsi.zig");
+ _ = @import("compiler_rt/fixunsxfdi.zig");
+ _ = @import("compiler_rt/fixunsxfti.zig");
+
+ _ = @import("compiler_rt/count0bits.zig");
+ _ = @import("compiler_rt/parity.zig");
+ _ = @import("compiler_rt/popcount.zig");
+ _ = @import("compiler_rt/bswap.zig");
+ _ = @import("compiler_rt/int.zig");
+ _ = @import("compiler_rt/shift.zig");
+
+ _ = @import("compiler_rt/negXi2.zig");
+
+ _ = @import("compiler_rt/muldi3.zig");
+
+ _ = @import("compiler_rt/absv.zig");
+ _ = @import("compiler_rt/absvsi2.zig");
+ _ = @import("compiler_rt/absvdi2.zig");
+ _ = @import("compiler_rt/absvti2.zig");
+
+ _ = @import("compiler_rt/negv.zig");
+ _ = @import("compiler_rt/addo.zig");
+ _ = @import("compiler_rt/subo.zig");
+ _ = @import("compiler_rt/mulo.zig");
+ _ = @import("compiler_rt/cmp.zig");
+
+ _ = @import("compiler_rt/negXf2.zig");
+
+ _ = @import("compiler_rt/os_version_check.zig");
+ _ = @import("compiler_rt/emutls.zig");
+ _ = @import("compiler_rt/arm.zig");
+ _ = @import("compiler_rt/aulldiv.zig");
+ _ = @import("compiler_rt/aullrem.zig");
+ _ = @import("compiler_rt/clear_cache.zig");
}
diff --git a/lib/compiler_rt/absv.zig b/lib/compiler_rt/absv.zig
index f14497daf2..8910a4a6b9 100644
--- a/lib/compiler_rt/absv.zig
+++ b/lib/compiler_rt/absv.zig
@@ -1,8 +1,6 @@
-// absv - absolute oVerflow
-// * @panic, if value can not be represented
-// - absvXi4_generic for unoptimized version
-
-inline fn absvXi(comptime ST: type, a: ST) ST {
+/// absv - absolute oVerflow
+/// * @panic if value can not be represented
+pub inline fn absv(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@@ -21,18 +19,6 @@ inline fn absvXi(comptime ST: type, a: ST) ST {
return x;
}
-pub fn __absvsi2(a: i32) callconv(.C) i32 {
- return absvXi(i32, a);
-}
-
-pub fn __absvdi2(a: i64) callconv(.C) i64 {
- return absvXi(i64, a);
-}
-
-pub fn __absvti2(a: i128) callconv(.C) i128 {
- return absvXi(i128, a);
-}
-
test {
_ = @import("absvsi2_test.zig");
_ = @import("absvdi2_test.zig");
diff --git a/lib/compiler_rt/absvdi2.zig b/lib/compiler_rt/absvdi2.zig
new file mode 100644
index 0000000000..7ebf561ae5
--- /dev/null
+++ b/lib/compiler_rt/absvdi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage });
+}
+
+pub fn __absvdi2(a: i64) callconv(.C) i64 {
+ return absv(i64, a);
+}
diff --git a/lib/compiler_rt/absvdi2_test.zig b/lib/compiler_rt/absvdi2_test.zig
index 4aa73513ee..e861ef0ff3 100644
--- a/lib/compiler_rt/absvdi2_test.zig
+++ b/lib/compiler_rt/absvdi2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvdi2 = @import("absvdi2.zig").__absvdi2;
+
fn test__absvdi2(a: i64, expected: i64) !void {
- var result = absv.__absvdi2(a);
+ var result = __absvdi2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/absvsi2.zig b/lib/compiler_rt/absvsi2.zig
new file mode 100644
index 0000000000..664925f8f9
--- /dev/null
+++ b/lib/compiler_rt/absvsi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage });
+}
+
+pub fn __absvsi2(a: i32) callconv(.C) i32 {
+ return absv(i32, a);
+}
diff --git a/lib/compiler_rt/absvsi2_test.zig b/lib/compiler_rt/absvsi2_test.zig
index 2cc3dbf991..9c74ebee67 100644
--- a/lib/compiler_rt/absvsi2_test.zig
+++ b/lib/compiler_rt/absvsi2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvsi2 = @import("absvsi2.zig").__absvsi2;
+
fn test__absvsi2(a: i32, expected: i32) !void {
- var result = absv.__absvsi2(a);
+ var result = __absvsi2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/absvti2.zig b/lib/compiler_rt/absvti2.zig
new file mode 100644
index 0000000000..f7d0f796b0
--- /dev/null
+++ b/lib/compiler_rt/absvti2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage });
+}
+
+pub fn __absvti2(a: i128) callconv(.C) i128 {
+ return absv(i128, a);
+}
diff --git a/lib/compiler_rt/absvti2_test.zig b/lib/compiler_rt/absvti2_test.zig
index 5b4deb3640..fbed961775 100644
--- a/lib/compiler_rt/absvti2_test.zig
+++ b/lib/compiler_rt/absvti2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvti2 = @import("absvti2.zig").__absvti2;
+
fn test__absvti2(a: i128, expected: i128) !void {
- var result = absv.__absvti2(a);
+ var result = __absvti2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/adddf3.zig b/lib/compiler_rt/adddf3.zig
new file mode 100644
index 0000000000..1b511f78a4
--- /dev/null
+++ b/lib/compiler_rt/adddf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage });
+ } else {
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage });
+ }
+}
+
+fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
+ return addf3(f64, a, b);
+}
+
+fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return addf3(f64, a, b);
+}
diff --git a/lib/compiler_rt/addXf3.zig b/lib/compiler_rt/addf3.zig
index 1a9de0fb74..7f2e368121 100644
--- a/lib/compiler_rt/addXf3.zig
+++ b/lib/compiler_rt/addf3.zig
@@ -1,84 +1,12 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
-
const std = @import("std");
const math = std.math;
-const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
-
-pub fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
- return addXf3(f32, a, b);
-}
-
-pub fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
- return addXf3(f64, a, b);
-}
-
-pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
- return addXf3(f80, a, b);
-}
-
-pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
- var b_rep = std.math.break_f80(b);
- b_rep.exp ^= 0x8000;
- return __addxf3(a, std.math.make_f80(b_rep));
-}
-
-pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
- return addXf3(f128, a, b);
-}
-
-pub fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
- const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
- return addXf3(f32, a, neg_b);
-}
-
-pub fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
- const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
- return addXf3(f64, a, neg_b);
-}
-
-pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
- const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
- return addXf3(f128, a, neg_b);
-}
-
-pub fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __addsf3, .{ a, b });
-}
-
-pub fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __adddf3, .{ a, b });
-}
-
-pub fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subsf3, .{ a, b });
-}
-
-pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subdf3, .{ a, b });
-}
-
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- const bits = @typeInfo(T).Float.bits;
- const Z = std.meta.Int(.unsigned, bits);
- const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
- const fractionalBits = math.floatFractionalBits(T);
- const integerBit = @as(Z, 1) << fractionalBits;
-
- const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, integerBit);
- significand.* <<= @intCast(S, shift);
- return @as(i32, 1) - shift;
-}
+const common = @import("./common.zig");
+const normalize = common.normalize;
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn addXf3(comptime T: type, a: T, b: T) T {
+/// Ported from:
+///
+/// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
+pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
@@ -240,5 +168,5 @@ fn addXf3(comptime T: type, a: T, b: T) T {
}
test {
- _ = @import("addXf3_test.zig");
+ _ = @import("addf3_test.zig");
}
diff --git a/lib/compiler_rt/addXf3_test.zig b/lib/compiler_rt/addf3_test.zig
index f38c9d6018..1df87a889f 100644
--- a/lib/compiler_rt/addXf3_test.zig
+++ b/lib/compiler_rt/addf3_test.zig
@@ -7,7 +7,9 @@ const std = @import("std");
const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
-const __addtf3 = @import("addXf3.zig").__addtf3;
+const __addtf3 = @import("addtf3.zig").__addtf3;
+const __addxf3 = @import("addxf3.zig").__addxf3;
+const __subtf3 = @import("subtf3.zig").__subtf3;
fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __addtf3(a, b);
@@ -48,8 +50,6 @@ test "addtf3" {
try test__addtf3(0x1.edcba52449872455634654321fp-1, 0x1.23456734245345543849abcdefp+5, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
}
-const __subtf3 = @import("addXf3.zig").__subtf3;
-
fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __subtf3(a, b);
@@ -87,7 +87,6 @@ test "subtf3" {
try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
}
-const __addxf3 = @import("addXf3.zig").__addxf3;
const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
fn test__addxf3(a: f80, b: f80, expected: u80) !void {
diff --git a/lib/compiler_rt/addo.zig b/lib/compiler_rt/addo.zig
index 91ed15747c..d14fe36710 100644
--- a/lib/compiler_rt/addo.zig
+++ b/lib/compiler_rt/addo.zig
@@ -1,4 +1,14 @@
+const std = @import("std");
const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ @export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
+ @export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
+ @export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
+}
// addo - add overflow
// * return a+%b.
diff --git a/lib/compiler_rt/addsf3.zig b/lib/compiler_rt/addsf3.zig
new file mode 100644
index 0000000000..83f8285371
--- /dev/null
+++ b/lib/compiler_rt/addsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage });
+ } else {
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage });
+ }
+}
+
+fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
+ return addf3(f32, a, b);
+}
+
+fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return addf3(f32, a, b);
+}
diff --git a/lib/compiler_rt/addtf3.zig b/lib/compiler_rt/addtf3.zig
new file mode 100644
index 0000000000..2a22493ded
--- /dev/null
+++ b/lib/compiler_rt/addtf3.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__addkf3, .{ .name = "__addkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_add, .{ .name = "_Qp_add", .linkage = common.linkage });
+ } else {
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
+
+fn __addkf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
+
+fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
+ c.* = addf3(f128, a.*, b.*);
+}
diff --git a/lib/compiler_rt/addxf3.zig b/lib/compiler_rt/addxf3.zig
new file mode 100644
index 0000000000..72cf955632
--- /dev/null
+++ b/lib/compiler_rt/addxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage });
+}
+
+pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
+ return addf3(f80, a, b);
+}
diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig
index f30d2fd6ec..145d3992f7 100644
--- a/lib/compiler_rt/arm.zig
+++ b/lib/compiler_rt/arm.zig
@@ -1,5 +1,46 @@
// ARM specific builtins
+const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (!builtin.is_test) {
+ if (arch.isARM() or arch.isThumb()) {
+ @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = common.linkage });
+ @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = common.linkage });
+ @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = common.linkage });
+
+ @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = common.linkage });
+ @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = common.linkage });
+
+ @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = common.linkage });
+ @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = common.linkage });
+
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = common.linkage });
+ @export(__aeabi_memcpy4, .{ .name = "__aeabi_memcpy4", .linkage = common.linkage });
+ @export(__aeabi_memcpy8, .{ .name = "__aeabi_memcpy8", .linkage = common.linkage });
+
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = common.linkage });
+ @export(__aeabi_memmove4, .{ .name = "__aeabi_memmove4", .linkage = common.linkage });
+ @export(__aeabi_memmove8, .{ .name = "__aeabi_memmove8", .linkage = common.linkage });
+
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = common.linkage });
+ @export(__aeabi_memset4, .{ .name = "__aeabi_memset4", .linkage = common.linkage });
+ @export(__aeabi_memset8, .{ .name = "__aeabi_memset8", .linkage = common.linkage });
+
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = common.linkage });
+ @export(__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage });
+ @export(__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage });
+
+ if (builtin.os.tag == .linux) {
+ @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage });
+ }
+ }
+ }
+}
const __divmodsi4 = @import("int.zig").__divmodsi4;
const __udivmodsi4 = @import("int.zig").__udivmodsi4;
@@ -14,11 +55,27 @@ pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
+pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memcpy(dest, src, n);
+}
+pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memcpy(dest, src, n);
+}
pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
+pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memmove(dest, src, n);
+}
+pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memmove(dest, src, n);
+}
pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
@setRuntimeSafety(false);
@@ -26,16 +83,32 @@ pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
// two arguments swapped
_ = memset(dest, c, n);
}
+pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, c, n);
+}
+pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, c, n);
+}
pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
+pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, 0, n);
+}
+pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, 0, n);
+}
// Dummy functions to avoid errors during the linking phase
-pub fn __aeabi_unwind_cpp_pr0() callconv(.C) void {}
-pub fn __aeabi_unwind_cpp_pr1() callconv(.C) void {}
-pub fn __aeabi_unwind_cpp_pr2() callconv(.C) void {}
+pub fn __aeabi_unwind_cpp_pr0() callconv(.AAPCS) void {}
+pub fn __aeabi_unwind_cpp_pr1() callconv(.AAPCS) void {}
+pub fn __aeabi_unwind_cpp_pr2() callconv(.AAPCS) void {}
// This function can only clobber r0 according to the ABI
pub fn __aeabi_read_tp() callconv(.Naked) void {
diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig
index 20545d0791..6935a858aa 100644
--- a/lib/compiler_rt/atomics.zig
+++ b/lib/compiler_rt/atomics.zig
@@ -2,8 +2,8 @@ const std = @import("std");
const builtin = @import("builtin");
const cpu = builtin.cpu;
const arch = cpu.arch;
-
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
// This parameter is true iff the target architecture supports the bare minimum
// to implement the atomic load/store intrinsics.
diff --git a/lib/compiler_rt/aulldiv.zig b/lib/compiler_rt/aulldiv.zig
index 7709e17e63..d9517c6d10 100644
--- a/lib/compiler_rt/aulldiv.zig
+++ b/lib/compiler_rt/aulldiv.zig
@@ -1,7 +1,20 @@
+const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage });
+ @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage });
+ }
+}
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
- @setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
diff --git a/lib/compiler_rt/aullrem.zig b/lib/compiler_rt/aullrem.zig
index dbd52cd377..43821eb9d3 100644
--- a/lib/compiler_rt/aullrem.zig
+++ b/lib/compiler_rt/aullrem.zig
@@ -1,7 +1,20 @@
+const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ @export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage });
+ @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage });
+ }
+}
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
- @setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
diff --git a/lib/compiler_rt/bswap.zig b/lib/compiler_rt/bswap.zig
index f1d2138811..9f7d2cb879 100644
--- a/lib/compiler_rt/bswap.zig
+++ b/lib/compiler_rt/bswap.zig
@@ -1,5 +1,14 @@
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = common.linkage });
+ @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = common.linkage });
+ @export(__bswapti2, .{ .name = "__bswapti2", .linkage = common.linkage });
+}
// bswap - byteswap
// - bswapXi2 for unoptimized big and little endian
@@ -12,7 +21,6 @@ const builtin = @import("builtin");
// 00 00 00 ff << 3*8 (rightmost byte)
inline fn bswapXi2(comptime T: type, a: T) T {
- @setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off
diff --git a/lib/compiler_rt/ceil.zig b/lib/compiler_rt/ceil.zig
index 06020ea8f8..406f61fbb9 100644
--- a/lib/compiler_rt/ceil.zig
+++ b/lib/compiler_rt/ceil.zig
@@ -1,12 +1,27 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
+//! Ported from musl, which is MIT licensed.
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__ceilh, .{ .name = "__ceilh", .linkage = common.linkage });
+ @export(ceilf, .{ .name = "ceilf", .linkage = common.linkage });
+ @export(ceil, .{ .name = "ceil", .linkage = common.linkage });
+ @export(__ceilx, .{ .name = "__ceilx", .linkage = common.linkage });
+ const ceilq_sym_name = if (common.want_ppc_abi) "ceilf128" else "ceilq";
+ @export(ceilq, .{ .name = ceilq_sym_name, .linkage = common.linkage });
+ @export(ceill, .{ .name = "ceill", .linkage = common.linkage });
+}
pub fn __ceilh(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig
index 0765a23811..b21606814c 100644
--- a/lib/compiler_rt/clear_cache.zig
+++ b/lib/compiler_rt/clear_cache.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const os = builtin.os.tag;
+pub const panic = @import("common.zig").panic;
// Ported from llvm-project d32170dbd5b0d54436537b6b75beaf44324e0c28
@@ -10,7 +11,13 @@ const os = builtin.os.tag;
// It is expected to invalidate the instruction cache for the
// specified range.
-pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
+comptime {
+ if (builtin.zig_backend != .stage2_llvm) {
+ _ = clear_cache;
+ }
+}
+
+fn clear_cache(start: usize, end: usize) callconv(.C) void {
const x86 = switch (arch) {
.i386, .x86_64 => true,
else => false,
diff --git a/lib/compiler_rt/cmp.zig b/lib/compiler_rt/cmp.zig
index 9eb4227527..8ff2c38cd4 100644
--- a/lib/compiler_rt/cmp.zig
+++ b/lib/compiler_rt/cmp.zig
@@ -1,5 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = common.linkage });
+ @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = common.linkage });
+ @export(__cmpti2, .{ .name = "__cmpti2", .linkage = common.linkage });
+ @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = common.linkage });
+ @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = common.linkage });
+ @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = common.linkage });
+}
// cmp - signed compare
// - cmpXi2_generic for unoptimized little and big endian
@@ -12,7 +25,6 @@ const builtin = @import("builtin");
// a > b => 2
inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
- @setRuntimeSafety(builtin.is_test);
var cmp1: i32 = 0;
var cmp2: i32 = 0;
if (a > b)
diff --git a/lib/compiler_rt/cmpdf2.zig b/lib/compiler_rt/cmpdf2.zig
new file mode 100644
index 0000000000..67dbcd8b4d
--- /dev/null
+++ b/lib/compiler_rt/cmpdf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage });
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage });
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage });
+ @export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage });
+ @export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage });
+ @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage });
+ @export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
+/// and `__ltdf2`.
+fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
+}
diff --git a/lib/compiler_rt/cmpsf2.zig b/lib/compiler_rt/cmpsf2.zig
new file mode 100644
index 0000000000..1ac40ef6e2
--- /dev/null
+++ b/lib/compiler_rt/cmpsf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage });
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage });
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage });
+ @export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage });
+ @export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage });
+ @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage });
+ @export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
+/// and `__ltsf2`.
+fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
+}
diff --git a/lib/compiler_rt/cmptf2.zig b/lib/compiler_rt/cmptf2.zig
new file mode 100644
index 0000000000..00263f943a
--- /dev/null
+++ b/lib/compiler_rt/cmptf2.zig
@@ -0,0 +1,122 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__eqkf2, .{ .name = "__eqkf2", .linkage = common.linkage });
+ @export(__nekf2, .{ .name = "__nekf2", .linkage = common.linkage });
+ @export(__ltkf2, .{ .name = "__ltkf2", .linkage = common.linkage });
+ @export(__lekf2, .{ .name = "__lekf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = common.linkage });
+ @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = common.linkage });
+ @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = common.linkage });
+ @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = common.linkage });
+ @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = common.linkage });
+ @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = common.linkage });
+ @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = common.linkage });
+ } else {
+ @export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage });
+ @export(__netf2, .{ .name = "__netf2", .linkage = common.linkage });
+ @export(__letf2, .{ .name = "__letf2", .linkage = common.linkage });
+ @export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage });
+ @export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
+/// and `__lttf2`.
+fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __letf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __netf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __eqkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __nekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __ltkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __lekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+const SparcFCMP = enum(i32) {
+ Equal = 0,
+ Less = 1,
+ Greater = 2,
+ Unordered = 3,
+};
+
+fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
+}
+
+fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
+}
+
+fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
+}
+
+fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Less;
+}
+
+fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
+}
+
+fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
+ return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
+ .Equal, .Greater => true,
+ .Less, .Unordered => false,
+ };
+}
+
+fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
+ return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
+ .Equal, .Less => true,
+ .Greater, .Unordered => false,
+ };
+}
diff --git a/lib/compiler_rt/cmpxf2.zig b/lib/compiler_rt/cmpxf2.zig
new file mode 100644
index 0000000000..7286316f99
--- /dev/null
+++ b/lib/compiler_rt/cmpxf2.zig
@@ -0,0 +1,50 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage });
+ @export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage });
+ @export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage });
+ @export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage });
+ @export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage });
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
+/// and `__ltxf2`.
+fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig
new file mode 100644
index 0000000000..b6e4a5d311
--- /dev/null
+++ b/lib/compiler_rt/common.zig
@@ -0,0 +1,190 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const want_aeabi = switch (builtin.abi) {
+ .eabi,
+ .eabihf,
+ .musleabi,
+ .musleabihf,
+ .gnueabi,
+ .gnueabihf,
+ => switch (builtin.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => true,
+ else => false,
+ },
+ else => false,
+};
+pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
+
+/// This governs whether to use these symbol names for f16/f32 conversions
+/// rather than the standard names:
+/// * __gnu_f2h_ieee
+/// * __gnu_h2f_ieee
+/// Known correct configurations:
+/// x86_64-freestanding-none => true
+/// x86_64-linux-none => true
+/// x86_64-linux-gnu => true
+/// x86_64-linux-musl => true
+/// x86_64-linux-eabi => true
+/// arm-linux-musleabihf => true
+/// arm-linux-gnueabihf => true
+/// arm-linux-eabihf => false
+/// wasm32-wasi-musl => false
+/// wasm32-freestanding-none => false
+/// x86_64-windows-gnu => true
+/// x86_64-windows-msvc => true
+/// any-macos-any => false
+pub const gnu_f16_abi = switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => false,
+
+ .arm, .armeb, .thumb, .thumbeb => switch (builtin.abi) {
+ .eabi, .eabihf => false,
+ else => true,
+ },
+
+ else => !builtin.os.tag.isDarwin(),
+};
+
+pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
+
+// Avoid dragging in the runtime safety mechanisms into this .o file,
+// unless we're trying to test compiler-rt.
+pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = error_return_trace;
+ if (builtin.is_test) {
+ @setCold(true);
+ std.debug.panic("{s}", .{msg});
+ } else {
+ unreachable;
+ }
+}
+
+/// AArch64 is the only ABI (at the moment) to support f16 arguments without the
+/// need for extending them to wider fp types.
+/// TODO remove this; do this type selection in the language rather than
+/// here in compiler-rt.
+pub const F16T = if (builtin.cpu.arch.isAARCH64()) f16 else u16;
+
+pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
+ switch (Z) {
+ u16 => {
+ // 16x16 --> 32 bit multiply
+ const product = @as(u32, a) * @as(u32, b);
+ hi.* = @intCast(u16, product >> 16);
+ lo.* = @truncate(u16, product);
+ },
+ u32 => {
+ // 32x32 --> 64 bit multiply
+ const product = @as(u64, a) * @as(u64, b);
+ hi.* = @truncate(u32, product >> 32);
+ lo.* = @truncate(u32, product);
+ },
+ u64 => {
+ const S = struct {
+ fn loWord(x: u64) u64 {
+ return @truncate(u32, x);
+ }
+ fn hiWord(x: u64) u64 {
+ return @truncate(u32, x >> 32);
+ }
+ };
+ // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
+ // many 64-bit platforms have this operation, but they tend to have hardware
+ // floating-point, so we don't bother with a special case for them here.
+ // Each of the component 32x32 -> 64 products
+ const plolo: u64 = S.loWord(a) * S.loWord(b);
+ const plohi: u64 = S.loWord(a) * S.hiWord(b);
+ const philo: u64 = S.hiWord(a) * S.loWord(b);
+ const phihi: u64 = S.hiWord(a) * S.hiWord(b);
+ // Sum terms that contribute to lo in a way that allows us to get the carry
+ const r0: u64 = S.loWord(plolo);
+ const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
+ lo.* = r0 +% (r1 << 32);
+ // Sum terms contributing to hi with the carry from lo
+ hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
+ },
+ u128 => {
+ const Word_LoMask = @as(u64, 0x00000000ffffffff);
+ const Word_HiMask = @as(u64, 0xffffffff00000000);
+ const Word_FullMask = @as(u64, 0xffffffffffffffff);
+ const S = struct {
+ fn Word_1(x: u128) u64 {
+ return @truncate(u32, x >> 96);
+ }
+ fn Word_2(x: u128) u64 {
+ return @truncate(u32, x >> 64);
+ }
+ fn Word_3(x: u128) u64 {
+ return @truncate(u32, x >> 32);
+ }
+ fn Word_4(x: u128) u64 {
+ return @truncate(u32, x);
+ }
+ };
+ // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
+ // many 64-bit platforms have this operation, but they tend to have hardware
+ // floating-point, so we don't bother with a special case for them here.
+
+ const product11: u64 = S.Word_1(a) * S.Word_1(b);
+ const product12: u64 = S.Word_1(a) * S.Word_2(b);
+ const product13: u64 = S.Word_1(a) * S.Word_3(b);
+ const product14: u64 = S.Word_1(a) * S.Word_4(b);
+ const product21: u64 = S.Word_2(a) * S.Word_1(b);
+ const product22: u64 = S.Word_2(a) * S.Word_2(b);
+ const product23: u64 = S.Word_2(a) * S.Word_3(b);
+ const product24: u64 = S.Word_2(a) * S.Word_4(b);
+ const product31: u64 = S.Word_3(a) * S.Word_1(b);
+ const product32: u64 = S.Word_3(a) * S.Word_2(b);
+ const product33: u64 = S.Word_3(a) * S.Word_3(b);
+ const product34: u64 = S.Word_3(a) * S.Word_4(b);
+ const product41: u64 = S.Word_4(a) * S.Word_1(b);
+ const product42: u64 = S.Word_4(a) * S.Word_2(b);
+ const product43: u64 = S.Word_4(a) * S.Word_3(b);
+ const product44: u64 = S.Word_4(a) * S.Word_4(b);
+
+ const sum0: u128 = @as(u128, product44);
+ const sum1: u128 = @as(u128, product34) +%
+ @as(u128, product43);
+ const sum2: u128 = @as(u128, product24) +%
+ @as(u128, product33) +%
+ @as(u128, product42);
+ const sum3: u128 = @as(u128, product14) +%
+ @as(u128, product23) +%
+ @as(u128, product32) +%
+ @as(u128, product41);
+ const sum4: u128 = @as(u128, product13) +%
+ @as(u128, product22) +%
+ @as(u128, product31);
+ const sum5: u128 = @as(u128, product12) +%
+ @as(u128, product21);
+ const sum6: u128 = @as(u128, product11);
+
+ const r0: u128 = (sum0 & Word_FullMask) +%
+ ((sum1 & Word_LoMask) << 32);
+ const r1: u128 = (sum0 >> 64) +%
+ ((sum1 >> 32) & Word_FullMask) +%
+ (sum2 & Word_FullMask) +%
+ ((sum3 << 32) & Word_HiMask);
+
+ lo.* = r0 +% (r1 << 64);
+ hi.* = (r1 >> 64) +%
+ (sum1 >> 96) +%
+ (sum2 >> 64) +%
+ (sum3 >> 32) +%
+ sum4 +%
+ (sum5 << 32) +%
+ (sum6 << 64);
+ },
+ else => @compileError("unsupported"),
+ }
+}
+
+pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
+ const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
+
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(std.math.Log2Int(Z), shift);
+ return @as(i32, 1) - shift;
+}
diff --git a/lib/compiler_rt/compareXf2.zig b/lib/compiler_rt/compareXf2.zig
deleted file mode 100644
index 9640298f8f..0000000000
--- a/lib/compiler_rt/compareXf2.zig
+++ /dev/null
@@ -1,328 +0,0 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/comparesf2.c
-
-const std = @import("std");
-const builtin = @import("builtin");
-
-const LE = enum(i32) {
- Less = -1,
- Equal = 0,
- Greater = 1,
-
- const Unordered: LE = .Greater;
-};
-
-const GE = enum(i32) {
- Less = -1,
- Equal = 0,
- Greater = 1,
-
- const Unordered: GE = .Less;
-};
-
-pub inline fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
- @setRuntimeSafety(builtin.is_test);
-
- const bits = @typeInfo(T).Float.bits;
- const srep_t = std.meta.Int(.signed, bits);
- const rep_t = std.meta.Int(.unsigned, bits);
-
- const significandBits = std.math.floatMantissaBits(T);
- const exponentBits = std.math.floatExponentBits(T);
- const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
- const absMask = signBit - 1;
- const infT = comptime std.math.inf(T);
- const infRep = @bitCast(rep_t, infT);
-
- const aInt = @bitCast(srep_t, a);
- const bInt = @bitCast(srep_t, b);
- const aAbs = @bitCast(rep_t, aInt) & absMask;
- const bAbs = @bitCast(rep_t, bInt) & absMask;
-
- // If either a or b is NaN, they are unordered.
- if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
-
- // If a and b are both zeros, they are equal.
- if ((aAbs | bAbs) == 0) return .Equal;
-
- // If at least one of a and b is positive, we get the same result comparing
- // a and b as signed integers as we would with a floating-point compare.
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt) {
- return .Less;
- } else if (aInt == bInt) {
- return .Equal;
- } else return .Greater;
- } else {
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- if (aInt > bInt) {
- return .Less;
- } else if (aInt == bInt) {
- return .Equal;
- } else return .Greater;
- }
-}
-
-pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
- const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
-
- const significandBits = std.math.floatMantissaBits(T);
- const exponentBits = std.math.floatExponentBits(T);
- const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
- const absMask = signBit - 1;
- const infRep = @bitCast(rep_t, std.math.inf(T));
-
- const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
- const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
-
- return @boolToInt(aAbs > infRep or bAbs > infRep);
-}
-
-// Comparison between f32
-
-pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f32, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f32, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
- return __lesf2(a, b);
-}
-
-pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
- return __lesf2(a, b);
-}
-
-pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
- return __lesf2(a, b);
-}
-
-pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
- return __gesf2(a, b);
-}
-
-// Comparison between f64
-
-pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f64, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f64, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
- return __ledf2(a, b);
-}
-
-pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
- return __ledf2(a, b);
-}
-
-pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
- return __ledf2(a, b);
-}
-
-pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
- return __gedf2(a, b);
-}
-
-// Comparison between f80
-
-pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
- const a_rep = std.math.break_f80(a);
- const b_rep = std.math.break_f80(b);
- const sig_bits = std.math.floatMantissaBits(f80);
- const int_bit = 0x8000000000000000;
- const sign_bit = 0x8000;
- const special_exp = 0x7FFF;
-
- // If either a or b is NaN, they are unordered.
- if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
- (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
- return RT.Unordered;
-
- // If a and b are both zeros, they are equal.
- if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
- return .Equal;
-
- if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
- return .Equal;
- } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
- // signs are different
- if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
- return .Less;
- } else {
- return .Greater;
- }
- } else {
- const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
- const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
- if (a_fraction < b_fraction) {
- return .Less;
- } else {
- return .Greater;
- }
- }
-}
-
-pub fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp_f80(LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp_f80(GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
- return __lexf2(a, b);
-}
-
-pub fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
- return __lexf2(a, b);
-}
-
-pub fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
- return __lexf2(a, b);
-}
-
-pub fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
- return __gexf2(a, b);
-}
-
-// Comparison between f128
-
-pub fn __letf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f128, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __getf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f128, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
- return __letf2(a, b);
-}
-
-pub fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
- return __letf2(a, b);
-}
-
-pub fn __netf2(a: f128, b: f128) callconv(.C) i32 {
- return __letf2(a, b);
-}
-
-pub fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
- return __getf2(a, b);
-}
-
-// Unordered comparison between f32/f64/f128
-
-pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f32, a, b);
-}
-
-pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f64, a, b);
-}
-
-pub fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f128, a, b);
-}
-
-// ARM EABI intrinsics
-
-pub fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __eqsf2, .{ a, b }) == 0);
-}
-
-pub fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ltsf2, .{ a, b }) < 0);
-}
-
-pub fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __lesf2, .{ a, b }) <= 0);
-}
-
-pub fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gesf2, .{ a, b }) >= 0);
-}
-
-pub fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gtsf2, .{ a, b }) > 0);
-}
-
-pub fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __unordsf2, .{ a, b });
-}
-
-pub fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __eqdf2, .{ a, b }) == 0);
-}
-
-pub fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ltdf2, .{ a, b }) < 0);
-}
-
-pub fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ledf2, .{ a, b }) <= 0);
-}
-
-pub fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gedf2, .{ a, b }) >= 0);
-}
-
-pub fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gtdf2, .{ a, b }) > 0);
-}
-
-pub fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __unorddf2, .{ a, b });
-}
-
-test "comparesf2" {
- _ = @import("comparesf2_test.zig");
-}
-test "comparedf2" {
- _ = @import("comparedf2_test.zig");
-}
diff --git a/lib/compiler_rt/comparedf2_test.zig b/lib/compiler_rt/comparedf2_test.zig
index a80297ffbf..a77718e57c 100644
--- a/lib/compiler_rt/comparedf2_test.zig
+++ b/lib/compiler_rt/comparedf2_test.zig
@@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const comparedf2 = @import("compareXf2.zig");
+const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
+const __ledf2 = @import("./cmpdf2.zig").__ledf2;
+const __ltdf2 = @import("./cmpdf2.zig").__ltdf2;
+const __nedf2 = @import("./cmpdf2.zig").__nedf2;
+
+const __gedf2 = @import("./gedf2.zig").__gedf2;
+const __gtdf2 = @import("./gedf2.zig").__gtdf2;
+
+const __unorddf2 = @import("./unorddf2.zig").__unorddf2;
const TestVector = struct {
a: f64,
@@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpdf2(vector: TestVector) bool {
- if (comparedf2.__eqdf2(vector.a, vector.b) != vector.eqReference) {
+ if (__eqdf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
- if (comparedf2.__gedf2(vector.a, vector.b) != vector.geReference) {
+ if (__gedf2(vector.a, vector.b) != vector.geReference) {
return false;
}
- if (comparedf2.__gtdf2(vector.a, vector.b) != vector.gtReference) {
+ if (__gtdf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
- if (comparedf2.__ledf2(vector.a, vector.b) != vector.leReference) {
+ if (__ledf2(vector.a, vector.b) != vector.leReference) {
return false;
}
- if (comparedf2.__ltdf2(vector.a, vector.b) != vector.ltReference) {
+ if (__ltdf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
- if (comparedf2.__nedf2(vector.a, vector.b) != vector.neReference) {
+ if (__nedf2(vector.a, vector.b) != vector.neReference) {
return false;
}
- if (comparedf2.__unorddf2(vector.a, vector.b) != vector.unReference) {
+ if (__unorddf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;
diff --git a/lib/compiler_rt/comparef.zig b/lib/compiler_rt/comparef.zig
new file mode 100644
index 0000000000..1fb6d2dfa0
--- /dev/null
+++ b/lib/compiler_rt/comparef.zig
@@ -0,0 +1,118 @@
+const std = @import("std");
+
+pub const LE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: LE = .Greater;
+};
+
+pub const GE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: GE = .Less;
+};
+
+pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
+ const bits = @typeInfo(T).Float.bits;
+ const srep_t = std.meta.Int(.signed, bits);
+ const rep_t = std.meta.Int(.unsigned, bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infT = comptime std.math.inf(T);
+ const infRep = @bitCast(rep_t, infT);
+
+ const aInt = @bitCast(srep_t, a);
+ const bInt = @bitCast(srep_t, b);
+ const aAbs = @bitCast(rep_t, aInt) & absMask;
+ const bAbs = @bitCast(rep_t, bInt) & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0) return .Equal;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ } else {
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ }
+}
+
+pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
+ const a_rep = std.math.break_f80(a);
+ const b_rep = std.math.break_f80(b);
+ const sig_bits = std.math.floatMantissaBits(f80);
+ const int_bit = 0x8000000000000000;
+ const sign_bit = 0x8000;
+ const special_exp = 0x7FFF;
+
+ // If either a or b is NaN, they are unordered.
+ if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
+ (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
+ return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
+ return .Equal;
+
+ if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
+ return .Equal;
+ } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
+ // signs are different
+ if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ } else {
+ const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
+ const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
+ if (a_fraction < b_fraction) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ }
+}
+
+pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
+ const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infRep = @bitCast(rep_t, std.math.inf(T));
+
+ const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
+ const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
+
+ return @boolToInt(aAbs > infRep or bAbs > infRep);
+}
+
+test {
+ _ = @import("comparesf2_test.zig");
+ _ = @import("comparedf2_test.zig");
+}
diff --git a/lib/compiler_rt/comparesf2_test.zig b/lib/compiler_rt/comparesf2_test.zig
index 8bc2c67956..b2fafd38dd 100644
--- a/lib/compiler_rt/comparesf2_test.zig
+++ b/lib/compiler_rt/comparesf2_test.zig
@@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const comparesf2 = @import("compareXf2.zig");
+const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
+const __lesf2 = @import("./cmpsf2.zig").__lesf2;
+const __ltsf2 = @import("./cmpsf2.zig").__ltsf2;
+const __nesf2 = @import("./cmpsf2.zig").__nesf2;
+
+const __gesf2 = @import("./gesf2.zig").__gesf2;
+const __gtsf2 = @import("./gesf2.zig").__gtsf2;
+
+const __unordsf2 = @import("./unordsf2.zig").__unordsf2;
const TestVector = struct {
a: f32,
@@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpsf2(vector: TestVector) bool {
- if (comparesf2.__eqsf2(vector.a, vector.b) != vector.eqReference) {
+ if (__eqsf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
- if (comparesf2.__gesf2(vector.a, vector.b) != vector.geReference) {
+ if (__gesf2(vector.a, vector.b) != vector.geReference) {
return false;
}
- if (comparesf2.__gtsf2(vector.a, vector.b) != vector.gtReference) {
+ if (__gtsf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
- if (comparesf2.__lesf2(vector.a, vector.b) != vector.leReference) {
+ if (__lesf2(vector.a, vector.b) != vector.leReference) {
return false;
}
- if (comparesf2.__ltsf2(vector.a, vector.b) != vector.ltReference) {
+ if (__ltsf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
- if (comparesf2.__nesf2(vector.a, vector.b) != vector.neReference) {
+ if (__nesf2(vector.a, vector.b) != vector.neReference) {
return false;
}
- if (comparesf2.__unordsf2(vector.a, vector.b) != vector.unReference) {
+ if (__unordsf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;
diff --git a/lib/compiler_rt/cos.zig b/lib/compiler_rt/cos.zig
index e01f458243..311d927168 100644
--- a/lib/compiler_rt/cos.zig
+++ b/lib/compiler_rt/cos.zig
@@ -1,11 +1,26 @@
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
const trig = @import("trig.zig");
const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+comptime {
+ @export(__cosh, .{ .name = "__cosh", .linkage = common.linkage });
+ @export(cosf, .{ .name = "cosf", .linkage = common.linkage });
+ @export(cos, .{ .name = "cos", .linkage = common.linkage });
+ @export(__cosx, .{ .name = "__cosx", .linkage = common.linkage });
+ const cosq_sym_name = if (common.want_ppc_abi) "cosf128" else "cosq";
+ @export(cosq, .{ .name = cosq_sym_name, .linkage = common.linkage });
+ @export(cosl, .{ .name = "cosl", .linkage = common.linkage });
+}
+
pub fn __cosh(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
return @floatCast(f16, cosf(a));
diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig
index 1f6d28ae0b..d763e5c8a3 100644
--- a/lib/compiler_rt/count0bits.zig
+++ b/lib/compiler_rt/count0bits.zig
@@ -1,5 +1,21 @@
const std = @import("std");
const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__clzsi2, .{ .name = "__clzsi2", .linkage = common.linkage });
+ @export(__clzdi2, .{ .name = "__clzdi2", .linkage = common.linkage });
+ @export(__clzti2, .{ .name = "__clzti2", .linkage = common.linkage });
+ @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = common.linkage });
+ @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = common.linkage });
+ @export(__ctzti2, .{ .name = "__ctzti2", .linkage = common.linkage });
+ @export(__ffssi2, .{ .name = "__ffssi2", .linkage = common.linkage });
+ @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = common.linkage });
+ @export(__ffsti2, .{ .name = "__ffsti2", .linkage = common.linkage });
+}
// clz - count leading zeroes
// - clzXi2 for unoptimized little and big endian
@@ -15,8 +31,6 @@ const builtin = @import("builtin");
// - ffsXi2 for unoptimized little and big endian
inline fn clzXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -154,8 +168,6 @@ pub fn __clzti2(a: i128) callconv(.C) i32 {
}
inline fn ctzXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -191,8 +203,6 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 {
}
inline fn ffsXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig
index 137f5c02f9..dd22f4836c 100644
--- a/lib/compiler_rt/divdf3.zig
+++ b/lib/compiler_rt/divdf3.zig
@@ -1,12 +1,35 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = common.linkage });
+ } else {
+ @export(__divdf3, .{ .name = "__divdf3", .linkage = common.linkage });
+ }
+}
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return div(a, b);
+}
+
+inline fn div(a: f64, b: f64) f64 {
const Z = std.meta.Int(.unsigned, 64);
const SignedZ = std.meta.Int(.signed, 64);
@@ -202,130 +225,6 @@ pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
}
}
-pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
- @setRuntimeSafety(builtin.is_test);
- switch (Z) {
- u32 => {
- // 32x32 --> 64 bit multiply
- const product = @as(u64, a) * @as(u64, b);
- hi.* = @truncate(u32, product >> 32);
- lo.* = @truncate(u32, product);
- },
- u64 => {
- const S = struct {
- fn loWord(x: u64) u64 {
- return @truncate(u32, x);
- }
- fn hiWord(x: u64) u64 {
- return @truncate(u32, x >> 32);
- }
- };
- // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
- // Each of the component 32x32 -> 64 products
- const plolo: u64 = S.loWord(a) * S.loWord(b);
- const plohi: u64 = S.loWord(a) * S.hiWord(b);
- const philo: u64 = S.hiWord(a) * S.loWord(b);
- const phihi: u64 = S.hiWord(a) * S.hiWord(b);
- // Sum terms that contribute to lo in a way that allows us to get the carry
- const r0: u64 = S.loWord(plolo);
- const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
- lo.* = r0 +% (r1 << 32);
- // Sum terms contributing to hi with the carry from lo
- hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
- },
- u128 => {
- const Word_LoMask = @as(u64, 0x00000000ffffffff);
- const Word_HiMask = @as(u64, 0xffffffff00000000);
- const Word_FullMask = @as(u64, 0xffffffffffffffff);
- const S = struct {
- fn Word_1(x: u128) u64 {
- return @truncate(u32, x >> 96);
- }
- fn Word_2(x: u128) u64 {
- return @truncate(u32, x >> 64);
- }
- fn Word_3(x: u128) u64 {
- return @truncate(u32, x >> 32);
- }
- fn Word_4(x: u128) u64 {
- return @truncate(u32, x);
- }
- };
- // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
-
- const product11: u64 = S.Word_1(a) * S.Word_1(b);
- const product12: u64 = S.Word_1(a) * S.Word_2(b);
- const product13: u64 = S.Word_1(a) * S.Word_3(b);
- const product14: u64 = S.Word_1(a) * S.Word_4(b);
- const product21: u64 = S.Word_2(a) * S.Word_1(b);
- const product22: u64 = S.Word_2(a) * S.Word_2(b);
- const product23: u64 = S.Word_2(a) * S.Word_3(b);
- const product24: u64 = S.Word_2(a) * S.Word_4(b);
- const product31: u64 = S.Word_3(a) * S.Word_1(b);
- const product32: u64 = S.Word_3(a) * S.Word_2(b);
- const product33: u64 = S.Word_3(a) * S.Word_3(b);
- const product34: u64 = S.Word_3(a) * S.Word_4(b);
- const product41: u64 = S.Word_4(a) * S.Word_1(b);
- const product42: u64 = S.Word_4(a) * S.Word_2(b);
- const product43: u64 = S.Word_4(a) * S.Word_3(b);
- const product44: u64 = S.Word_4(a) * S.Word_4(b);
-
- const sum0: u128 = @as(u128, product44);
- const sum1: u128 = @as(u128, product34) +%
- @as(u128, product43);
- const sum2: u128 = @as(u128, product24) +%
- @as(u128, product33) +%
- @as(u128, product42);
- const sum3: u128 = @as(u128, product14) +%
- @as(u128, product23) +%
- @as(u128, product32) +%
- @as(u128, product41);
- const sum4: u128 = @as(u128, product13) +%
- @as(u128, product22) +%
- @as(u128, product31);
- const sum5: u128 = @as(u128, product12) +%
- @as(u128, product21);
- const sum6: u128 = @as(u128, product11);
-
- const r0: u128 = (sum0 & Word_FullMask) +%
- ((sum1 & Word_LoMask) << 32);
- const r1: u128 = (sum0 >> 64) +%
- ((sum1 >> 32) & Word_FullMask) +%
- (sum2 & Word_FullMask) +%
- ((sum3 << 32) & Word_HiMask);
-
- lo.* = r0 +% (r1 << 64);
- hi.* = (r1 >> 64) +%
- (sum1 >> 96) +%
- (sum2 >> 64) +%
- (sum3 >> 32) +%
- sum4 +%
- (sum5 << 32) +%
- (sum6 << 64);
- },
- else => @compileError("unsupported"),
- }
-}
-
-pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
-
- const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
- significand.* <<= @intCast(std.math.Log2Int(Z), shift);
- return @as(i32, 1) - shift;
-}
-
-pub fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __divdf3, .{ a, b });
-}
-
test {
_ = @import("divdf3_test.zig");
}
diff --git a/lib/compiler_rt/divsf3.zig b/lib/compiler_rt/divsf3.zig
index 5e7dc7bb44..13565f9b64 100644
--- a/lib/compiler_rt/divsf3.zig
+++ b/lib/compiler_rt/divsf3.zig
@@ -1,12 +1,33 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+
+const common = @import("common.zig");
+const normalize = common.normalize;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = common.linkage });
+ } else {
+ @export(__divsf3, .{ .name = "__divsf3", .linkage = common.linkage });
+ }
+}
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return div(a, b);
+}
+
+inline fn div(a: f32, b: f32) f32 {
const Z = std.meta.Int(.unsigned, 32);
const significandBits = std.math.floatMantissaBits(f32);
@@ -184,22 +205,6 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- const significandBits = std.math.floatMantissaBits(T);
- const implicitBit = @as(Z, 1) << significandBits;
-
- const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
- significand.* <<= @intCast(std.math.Log2Int(Z), shift);
- return 1 - shift;
-}
-
-pub fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __divsf3, .{ a, b });
-}
-
test {
_ = @import("divsf3_test.zig");
}
diff --git a/lib/compiler_rt/divtf3.zig b/lib/compiler_rt/divtf3.zig
index fc26c60266..b6cabeab91 100644
--- a/lib/compiler_rt/divtf3.zig
+++ b/lib/compiler_rt/divtf3.zig
@@ -1,11 +1,35 @@
const std = @import("std");
const builtin = @import("builtin");
-const normalize = @import("divdf3.zig").normalize;
-const wideMultiply = @import("divdf3.zig").wideMultiply;
+const common = @import("common.zig");
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__divkf3, .{ .name = "__divkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_div, .{ .name = "_Qp_div", .linkage = common.linkage });
+ } else {
+ @export(__divtf3, .{ .name = "__divtf3", .linkage = common.linkage });
+ }
+}
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __divkf3(a: f128, b: f128) callconv(.C) f128 {
+ return div(a, b);
+}
+
+fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+ c.* = div(a.*, b.*);
+}
+
+inline fn div(a: f128, b: f128) f128 {
const Z = std.meta.Int(.unsigned, 128);
const significandBits = std.math.floatMantissaBits(f128);
diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig
index 41286c3414..b99a9081a4 100644
--- a/lib/compiler_rt/divti3.zig
+++ b/lib/compiler_rt/divti3.zig
@@ -1,9 +1,43 @@
-const udivmod = @import("udivmod.zig").udivmod;
+const std = @import("std");
const builtin = @import("builtin");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ }
+}
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+const v128 = @import("std").meta.Vector(2, u64);
+
+fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn div(a: i128, b: i128) i128 {
const s_a = a >> (128 - 1);
const s_b = b >> (128 - 1);
@@ -15,14 +49,6 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
return (@bitCast(i128, r) ^ s) -% s;
}
-const v128 = @import("std").meta.Vector(2, u64);
-pub fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
- @bitCast(i128, a),
- @bitCast(i128, b),
- }));
-}
-
test {
_ = @import("divti3_test.zig");
}
diff --git a/lib/compiler_rt/divxf3.zig b/lib/compiler_rt/divxf3.zig
index 5f5ed667ec..b8d27a6da0 100644
--- a/lib/compiler_rt/divxf3.zig
+++ b/lib/compiler_rt/divxf3.zig
@@ -1,10 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
-const normalize = @import("divdf3.zig").normalize;
-const wideMultiply = @import("divdf3.zig").wideMultiply;
+const arch = builtin.cpu.arch;
+
+const common = @import("common.zig");
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__divxf3, .{ .name = "__divxf3", .linkage = common.linkage });
+}
pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
- @setRuntimeSafety(builtin.is_test);
const T = f80;
const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig
index e6aa8930e9..723eac4af2 100644
--- a/lib/compiler_rt/emutls.zig
+++ b/lib/compiler_rt/emutls.zig
@@ -1,22 +1,26 @@
-// __emutls_get_address specific builtin
-//
-// derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
-// https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
-//
+//! __emutls_get_address specific builtin
+//!
+//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
+//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
const abort = std.os.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
-// defined in C as:
-// typedef unsigned int gcc_word __attribute__((mode(word)));
+/// defined in C as:
+/// typedef unsigned int gcc_word __attribute__((mode(word)));
const gcc_word = usize;
+pub const panic = common.panic;
+
comptime {
- assert(builtin.link_libc);
+ if (builtin.link_libc and builtin.os.tag == .openbsd) {
+ @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage });
+ }
}
/// public entrypoint for generated code using EmulatedTLS
@@ -319,6 +323,8 @@ const emutls_control = extern struct {
};
test "simple_allocator" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
@@ -333,6 +339,8 @@ test "simple_allocator" {
}
test "__emutls_get_address zeroed" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var ctl = emutls_control.init(usize, null);
try expect(ctl.object.index == 0);
@@ -352,6 +360,8 @@ test "__emutls_get_address zeroed" {
}
test "__emutls_get_address with default_value" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var value: usize = 5678; // default value
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
@@ -370,6 +380,8 @@ test "__emutls_get_address with default_value" {
}
test "test default_value with differents sizes" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var def: T = value;
diff --git a/lib/compiler_rt/exp.zig b/lib/compiler_rt/exp.zig
index a2c5d0e550..f34f226be4 100644
--- a/lib/compiler_rt/exp.zig
+++ b/lib/compiler_rt/exp.zig
@@ -5,8 +5,23 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp.c
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__exph, .{ .name = "__exph", .linkage = common.linkage });
+ @export(expf, .{ .name = "expf", .linkage = common.linkage });
+ @export(exp, .{ .name = "exp", .linkage = common.linkage });
+ @export(__expx, .{ .name = "__expx", .linkage = common.linkage });
+ const expq_sym_name = if (common.want_ppc_abi) "expf128" else "expq";
+ @export(expq, .{ .name = expq_sym_name, .linkage = common.linkage });
+ @export(expl, .{ .name = "expl", .linkage = common.linkage });
+}
pub fn __exph(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/exp2.zig b/lib/compiler_rt/exp2.zig
index cbcb53c99f..e89a918501 100644
--- a/lib/compiler_rt/exp2.zig
+++ b/lib/compiler_rt/exp2.zig
@@ -5,8 +5,23 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp2.c
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__exp2h, .{ .name = "__exp2h", .linkage = common.linkage });
+ @export(exp2f, .{ .name = "exp2f", .linkage = common.linkage });
+ @export(exp2, .{ .name = "exp2", .linkage = common.linkage });
+ @export(__exp2x, .{ .name = "__exp2x", .linkage = common.linkage });
+ const exp2q_sym_name = if (common.want_ppc_abi) "exp2f128" else "exp2q";
+ @export(exp2q, .{ .name = exp2q_sym_name, .linkage = common.linkage });
+ @export(exp2l, .{ .name = "exp2l", .linkage = common.linkage });
+}
pub fn __exp2h(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/extend_f80.zig b/lib/compiler_rt/extend_f80.zig
deleted file mode 100644
index e68fb5fcf8..0000000000
--- a/lib/compiler_rt/extend_f80.zig
+++ /dev/null
@@ -1,131 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const native_arch = builtin.cpu.arch;
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
-
-pub fn __extendhfxf2(a: F16T) callconv(.C) f80 {
- return extendF80(f16, @bitCast(u16, a));
-}
-
-pub fn __extendsfxf2(a: f32) callconv(.C) f80 {
- return extendF80(f32, @bitCast(u32, a));
-}
-
-pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
- return extendF80(f64, @bitCast(u64, a));
-}
-
-inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
- @setRuntimeSafety(builtin.is_test);
-
- const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
- const src_sig_bits = std.math.floatMantissaBits(src_t);
- const dst_int_bit = 0x8000000000000000;
- const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
-
- const dst_exp_bias = 16383;
-
- const src_bits = @bitSizeOf(src_t);
- const src_exp_bits = src_bits - src_sig_bits - 1;
- const src_inf_exp = (1 << src_exp_bits) - 1;
- const src_exp_bias = src_inf_exp >> 1;
-
- const src_min_normal = 1 << src_sig_bits;
- const src_inf = src_inf_exp << src_sig_bits;
- const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
- const src_abs_mask = src_sign_mask - 1;
- const src_qnan = 1 << (src_sig_bits - 1);
- const src_nan_code = src_qnan - 1;
-
- var dst: std.math.F80 = undefined;
-
- // Break a into a sign and representation of the absolute value
- const a_abs = a & src_abs_mask;
- const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
-
- if (a_abs -% src_min_normal < src_inf - src_min_normal) {
- // a is a normal number.
- // Extend to the destination type by shifting the significand and
- // exponent into the proper position and rebiasing the exponent.
- dst.exp = @intCast(u16, a_abs >> src_sig_bits);
- dst.exp += dst_exp_bias - src_exp_bias;
- dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
- dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
- } else if (a_abs >= src_inf) {
- // a is NaN or infinity.
- // Conjure the result by beginning with infinity, then setting the qNaN
- // bit (if needed) and right-aligning the rest of the trailing NaN
- // payload field.
- dst.exp = 0x7fff;
- dst.fraction = dst_int_bit;
- dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
- dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
- } else if (a_abs != 0) {
- // a is denormal.
- // renormalize the significand and clear the leading bit, then insert
- // the correct adjusted exponent in the destination type.
- const scale: u16 = @clz(src_rep_t, a_abs) -
- @clz(src_rep_t, @as(src_rep_t, src_min_normal));
-
- dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
- dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
- dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
- dst.exp ^= 1;
- dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
- } else {
- // a is zero.
- dst.exp = 0;
- dst.fraction = 0;
- }
-
- dst.exp |= sign;
- return std.math.make_f80(dst);
-}
-
-pub fn __extendxftf2(a: f80) callconv(.C) f128 {
- @setRuntimeSafety(builtin.is_test);
-
- const src_int_bit: u64 = 0x8000000000000000;
- const src_sig_mask = ~src_int_bit;
- const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
- const dst_sig_bits = std.math.floatMantissaBits(f128);
-
- const dst_bits = @bitSizeOf(f128);
-
- const dst_min_normal = @as(u128, 1) << dst_sig_bits;
-
- // Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
- const sign = a_rep.exp & 0x8000;
- a_rep.exp &= 0x7FFF;
- var abs_result: u128 = undefined;
-
- if (a_rep.exp == 0 and a_rep.fraction == 0) {
- // zero
- abs_result = 0;
- } else if (a_rep.exp == 0x7FFF) {
- // a is nan or infinite
- abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
- abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
- } else if (a_rep.fraction & src_int_bit != 0) {
- // a is a normal value
- abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
- abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
- } else {
- // a is denormal
- // renormalize the significand and clear the leading bit and integer part,
- // then insert the correct adjusted exponent in the destination type.
- const scale: u32 = @clz(u64, a_rep.fraction);
- abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
- abs_result ^= dst_min_normal;
- abs_result |= @as(u128, scale + 1) << dst_sig_bits;
- }
-
- // Apply the signbit to (dst_t)abs(a).
- const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
- return @bitCast(f128, result);
-}
diff --git a/lib/compiler_rt/extenddftf2.zig b/lib/compiler_rt/extenddftf2.zig
new file mode 100644
index 0000000000..21e497b3a4
--- /dev/null
+++ b/lib/compiler_rt/extenddftf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extenddfkf2, .{ .name = "__extenddfkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = common.linkage });
+ } else {
+ @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extenddftf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
+
+fn __extenddfkf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
+
+fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
+ c.* = extendf(f128, f64, @bitCast(u64, a));
+}
diff --git a/lib/compiler_rt/extenddfxf2.zig b/lib/compiler_rt/extenddfxf2.zig
new file mode 100644
index 0000000000..e76b2fc038
--- /dev/null
+++ b/lib/compiler_rt/extenddfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage });
+}
+
+fn __extenddfxf2(a: f64) callconv(.C) f80 {
+ return extend_f80(f64, @bitCast(u64, a));
+}
diff --git a/lib/compiler_rt/extendXfYf2.zig b/lib/compiler_rt/extendf.zig
index 8622fe1513..8eb23c1d82 100644
--- a/lib/compiler_rt/extendXfYf2.zig
+++ b/lib/compiler_rt/extendf.zig
@@ -1,45 +1,10 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const native_arch = builtin.cpu.arch;
-
-pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
- return extendXfYf2(f64, f32, @bitCast(u32, a));
-}
-
-pub fn __extenddftf2(a: f64) callconv(.C) f128 {
- return extendXfYf2(f128, f64, @bitCast(u64, a));
-}
-
-pub fn __extendsftf2(a: f32) callconv(.C) f128 {
- return extendXfYf2(f128, f32, @bitCast(u32, a));
-}
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
-
-pub fn __extendhfsf2(a: F16T) callconv(.C) f32 {
- return extendXfYf2(f32, f16, @bitCast(u16, a));
-}
-
-pub fn __extendhftf2(a: F16T) callconv(.C) f128 {
- return extendXfYf2(f128, f16, @bitCast(u16, a));
-}
-
-pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, arg });
-}
-
-pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, arg) });
-}
-
-inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
- @setRuntimeSafety(builtin.is_test);
+pub inline fn extendf(
+ comptime dst_t: type,
+ comptime src_t: type,
+ a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits),
+) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@@ -107,6 +72,71 @@ inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.In
return @bitCast(dst_t, result);
}
+pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
+ const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(src_t);
+ const dst_int_bit = 0x8000000000000000;
+ const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ const dst_exp_bias = 16383;
+
+ const src_bits = @bitSizeOf(src_t);
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = (1 << src_exp_bits) - 1;
+ const src_exp_bias = src_inf_exp >> 1;
+
+ const src_min_normal = 1 << src_sig_bits;
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const src_qnan = 1 << (src_sig_bits - 1);
+ const src_nan_code = src_qnan - 1;
+
+ var dst: std.math.F80 = undefined;
+
+ // Break a into a sign and representation of the absolute value
+ const a_abs = a & src_abs_mask;
+ const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
+
+ if (a_abs -% src_min_normal < src_inf - src_min_normal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ dst.exp = @intCast(u16, a_abs >> src_sig_bits);
+ dst.exp += dst_exp_bias - src_exp_bias;
+ dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ } else if (a_abs >= src_inf) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ dst.exp = 0x7fff;
+ dst.fraction = dst_int_bit;
+ dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
+ } else if (a_abs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u16 = @clz(src_rep_t, a_abs) -
+ @clz(src_rep_t, @as(src_rep_t, src_min_normal));
+
+ dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
+ dst.exp ^= 1;
+ dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
+ } else {
+ // a is zero.
+ dst.exp = 0;
+ dst.fraction = 0;
+ }
+
+ dst.exp |= sign;
+ return std.math.make_f80(dst);
+}
+
test {
- _ = @import("extendXfYf2_test.zig");
+ _ = @import("extendf_test.zig");
}
diff --git a/lib/compiler_rt/extendXfYf2_test.zig b/lib/compiler_rt/extendf_test.zig
index d0c4f82e97..1102092a04 100644
--- a/lib/compiler_rt/extendXfYf2_test.zig
+++ b/lib/compiler_rt/extendf_test.zig
@@ -1,22 +1,22 @@
const builtin = @import("builtin");
-const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
-const __extendhftf2 = @import("extendXfYf2.zig").__extendhftf2;
-const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
-const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
-const F16T = @import("extendXfYf2.zig").F16T;
+const __extendhfsf2 = @import("extendhfsf2.zig").__extendhfsf2;
+const __extendhftf2 = @import("extendhftf2.zig").__extendhftf2;
+const __extendsftf2 = @import("extendsftf2.zig").__extendsftf2;
+const __extenddftf2 = @import("extenddftf2.zig").__extenddftf2;
+const F16T = @import("./common.zig").F16T;
-fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) !void {
+fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void {
const x = __extenddftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@@ -43,18 +43,18 @@ fn test__extendhfsf2(a: u16, expected: u32) !void {
return error.TestFailure;
}
-fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) !void {
+fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void {
const x = __extendsftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@@ -159,18 +159,18 @@ fn makeInf32() f32 {
return @bitCast(f32, @as(u32, 0x7f800000));
}
-fn test__extendhftf2(a: u16, expectedHi: u64, expectedLo: u64) !void {
+fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void {
const x = __extendhftf2(@bitCast(F16T, a));
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig
new file mode 100644
index 0000000000..a6bf5f5be5
--- /dev/null
+++ b/lib/compiler_rt/extendhfsf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.gnu_f16_abi) {
+ @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
+ } else if (common.want_aeabi) {
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
+ } else {
+ @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __gnu_h2f_ieee(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendhftf2.zig b/lib/compiler_rt/extendhftf2.zig
new file mode 100644
index 0000000000..5d339fabce
--- /dev/null
+++ b/lib/compiler_rt/extendhftf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage });
+}
+
+pub fn __extendhftf2(a: common.F16T) callconv(.C) f128 {
+ return extendf(f128, f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendhfxf2.zig b/lib/compiler_rt/extendhfxf2.zig
new file mode 100644
index 0000000000..e509f96575
--- /dev/null
+++ b/lib/compiler_rt/extendhfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage });
+}
+
+fn __extendhfxf2(a: common.F16T) callconv(.C) f80 {
+ return extend_f80(f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendsfdf2.zig b/lib/compiler_rt/extendsfdf2.zig
new file mode 100644
index 0000000000..7fd69f6c22
--- /dev/null
+++ b/lib/compiler_rt/extendsfdf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage });
+ } else {
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage });
+ }
+}
+
+fn __extendsfdf2(a: f32) callconv(.C) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
+
+fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendsftf2.zig b/lib/compiler_rt/extendsftf2.zig
new file mode 100644
index 0000000000..acdc0d586d
--- /dev/null
+++ b/lib/compiler_rt/extendsftf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extendsfkf2, .{ .name = "__extendsfkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = common.linkage });
+ } else {
+ @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extendsftf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
+
+fn __extendsfkf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
+
+fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
+ c.* = extendf(f128, f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendsfxf2.zig b/lib/compiler_rt/extendsfxf2.zig
new file mode 100644
index 0000000000..41bb5ace85
--- /dev/null
+++ b/lib/compiler_rt/extendsfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage });
+}
+
+fn __extendsfxf2(a: f32) callconv(.C) f80 {
+ return extend_f80(f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig
new file mode 100644
index 0000000000..bb5d6a377b
--- /dev/null
+++ b/lib/compiler_rt/extendxftf2.zig
@@ -0,0 +1,50 @@
+const std = @import("std");
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage });
+}
+
+fn __extendxftf2(a: f80) callconv(.C) f128 {
+ const src_int_bit: u64 = 0x8000000000000000;
+ const src_sig_mask = ~src_int_bit;
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(f128);
+
+ const dst_bits = @bitSizeOf(f128);
+
+ const dst_min_normal = @as(u128, 1) << dst_sig_bits;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = std.math.break_f80(a);
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ var abs_result: u128 = undefined;
+
+ if (a_rep.exp == 0 and a_rep.fraction == 0) {
+ // zero
+ abs_result = 0;
+ } else if (a_rep.exp == 0x7FFF) {
+ // a is nan or infinite
+ abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else if (a_rep.fraction & src_int_bit != 0) {
+ // a is a normal value
+ abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else {
+ // a is denormal
+ // renormalize the significand and clear the leading bit and integer part,
+ // then insert the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(u64, a_rep.fraction);
+ abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
+ abs_result ^= dst_min_normal;
+ abs_result |= @as(u128, scale + 1) << dst_sig_bits;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
+ return @bitCast(f128, result);
+}
diff --git a/lib/compiler_rt/fabs.zig b/lib/compiler_rt/fabs.zig
index 396fdd46b7..fd3a58a9b7 100644
--- a/lib/compiler_rt/fabs.zig
+++ b/lib/compiler_rt/fabs.zig
@@ -1,4 +1,19 @@
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fabsh, .{ .name = "__fabsh", .linkage = common.linkage });
+ @export(fabsf, .{ .name = "fabsf", .linkage = common.linkage });
+ @export(fabs, .{ .name = "fabs", .linkage = common.linkage });
+ @export(__fabsx, .{ .name = "__fabsx", .linkage = common.linkage });
+ const fabsq_sym_name = if (common.want_ppc_abi) "fabsf128" else "fabsq";
+ @export(fabsq, .{ .name = fabsq_sym_name, .linkage = common.linkage });
+ @export(fabsl, .{ .name = "fabsl", .linkage = common.linkage });
+}
pub fn __fabsh(a: f16) callconv(.C) f16 {
return generic_fabs(a);
diff --git a/lib/compiler_rt/fixXfYi.zig b/lib/compiler_rt/fixXfYi.zig
deleted file mode 100644
index 01832ec56f..0000000000
--- a/lib/compiler_rt/fixXfYi.zig
+++ /dev/null
@@ -1,224 +0,0 @@
-const std = @import("std");
-const math = std.math;
-const Log2Int = math.Log2Int;
-const is_test = @import("builtin").is_test;
-
-pub inline fn fixXfYi(comptime I: type, a: anytype) I {
- @setRuntimeSafety(is_test);
-
- const F = @TypeOf(a);
- const float_bits = @typeInfo(F).Float.bits;
- const int_bits = @typeInfo(I).Int.bits;
- const rep_t = std.meta.Int(.unsigned, float_bits);
- const sig_bits = math.floatMantissaBits(F);
- const exp_bits = math.floatExponentBits(F);
- const fractional_bits = math.floatFractionalBits(F);
-
- const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
- const max_exp = (1 << (exp_bits - 1));
- const exp_bias = max_exp - 1;
- const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
-
- // Break a into sign, exponent, significand
- const a_rep: rep_t = @bitCast(rep_t, a);
- const negative = (a_rep >> (float_bits - 1)) != 0;
- const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
- const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
-
- // If the exponent is negative, the result rounds to zero.
- if (exponent < 0) return 0;
-
- // If the value is too large for the integer type, saturate.
- switch (@typeInfo(I).Int.signedness) {
- .unsigned => {
- if (negative) return 0;
- if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
- },
- .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
- return if (negative) math.minInt(I) else math.maxInt(I);
- },
- }
-
- // If 0 <= exponent < sig_bits, right shift to get the result.
- // Otherwise, shift left.
- var result: I = undefined;
- if (exponent < fractional_bits) {
- result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
- } else {
- result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
- }
-
- if ((@typeInfo(I).Int.signedness == .signed) and negative)
- return ~result +% 1;
- return result;
-}
-
-// Conversion from f16
-
-pub fn __fixhfsi(a: f16) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunshfsi(a: f16) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixhfdi(a: f16) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunshfdi(a: f16) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixhfti(a: f16) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunshfti(a: f16) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f32
-
-pub fn __fixsfsi(a: f32) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixsfdi(a: f32) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixsfti(a: f32) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunssfti(a: f32) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f64
-
-pub fn __fixdfsi(a: f64) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixdfdi(a: f64) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixdfti(a: f64) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f80
-
-pub fn __fixxfsi(a: f80) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunsxfsi(a: f80) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixxfdi(a: f80) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunsxfdi(a: f80) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixxfti(a: f80) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f128
-
-pub fn __fixtfsi(a: f128) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixtfdi(a: f128) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixtfti(a: f128) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunstfti(a: f128) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f32
-
-pub fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
- return fixXfYi(u64, a);
-}
-
-// Conversion from f64
-
-pub fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
- return fixXfYi(u64, a);
-}
-
-test {
- _ = @import("fixXfYi_test.zig");
-}
diff --git a/lib/compiler_rt/fixdfdi.zig b/lib/compiler_rt/fixdfdi.zig
new file mode 100644
index 0000000000..5935f23524
--- /dev/null
+++ b/lib/compiler_rt/fixdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixdfdi(a: f64) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixdfsi.zig b/lib/compiler_rt/fixdfsi.zig
new file mode 100644
index 0000000000..983c84ccb1
--- /dev/null
+++ b/lib/compiler_rt/fixdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixdfsi(a: f64) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig
new file mode 100644
index 0000000000..b2476ce2f3
--- /dev/null
+++ b/lib/compiler_rt/fixdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
+}
+
+pub fn __fixdfti(a: f64) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixhfdi.zig b/lib/compiler_rt/fixhfdi.zig
new file mode 100644
index 0000000000..28e871f495
--- /dev/null
+++ b/lib/compiler_rt/fixhfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage });
+}
+
+fn __fixhfdi(a: f16) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixhfsi.zig b/lib/compiler_rt/fixhfsi.zig
new file mode 100644
index 0000000000..23440eea22
--- /dev/null
+++ b/lib/compiler_rt/fixhfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage });
+}
+
+fn __fixhfsi(a: f16) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig
new file mode 100644
index 0000000000..36fc1bf607
--- /dev/null
+++ b/lib/compiler_rt/fixhfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
+}
+
+fn __fixhfti(a: f16) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixsfdi.zig b/lib/compiler_rt/fixsfdi.zig
new file mode 100644
index 0000000000..0c4fb7f3f6
--- /dev/null
+++ b/lib/compiler_rt/fixsfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixsfdi(a: f32) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixsfsi.zig b/lib/compiler_rt/fixsfsi.zig
new file mode 100644
index 0000000000..f48e354cd2
--- /dev/null
+++ b/lib/compiler_rt/fixsfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixsfsi(a: f32) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig
new file mode 100644
index 0000000000..4bf68ec8b0
--- /dev/null
+++ b/lib/compiler_rt/fixsfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
+}
+
+pub fn __fixsfti(a: f32) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixtfdi.zig b/lib/compiler_rt/fixtfdi.zig
new file mode 100644
index 0000000000..9cc9835352
--- /dev/null
+++ b/lib/compiler_rt/fixtfdi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfdi, .{ .name = "__fixkfdi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = common.linkage });
+ } else {
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixtfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __fixkfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn _Qp_qtox(a: *const f128) callconv(.C) i64 {
+ return floatToInt(i64, a.*);
+}
diff --git a/lib/compiler_rt/fixtfsi.zig b/lib/compiler_rt/fixtfsi.zig
new file mode 100644
index 0000000000..f46208f02b
--- /dev/null
+++ b/lib/compiler_rt/fixtfsi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfsi, .{ .name = "__fixkfsi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = common.linkage });
+ } else {
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixtfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __fixkfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn _Qp_qtoi(a: *const f128) callconv(.C) i32 {
+ return floatToInt(i32, a.*);
+}
diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig
new file mode 100644
index 0000000000..9ba761729e
--- /dev/null
+++ b/lib/compiler_rt/fixtfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
+}
+
+pub fn __fixtfti(a: f128) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixunsdfdi.zig b/lib/compiler_rt/fixunsdfdi.zig
new file mode 100644
index 0000000000..edc0806405
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunsdfsi.zig b/lib/compiler_rt/fixunsdfsi.zig
new file mode 100644
index 0000000000..cc413f3983
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig
new file mode 100644
index 0000000000..ce3c4aabdd
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
+}
+
+pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunshfdi.zig b/lib/compiler_rt/fixunshfdi.zig
new file mode 100644
index 0000000000..5058bc5e68
--- /dev/null
+++ b/lib/compiler_rt/fixunshfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage });
+}
+
+fn __fixunshfdi(a: f16) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunshfsi.zig b/lib/compiler_rt/fixunshfsi.zig
new file mode 100644
index 0000000000..5755048814
--- /dev/null
+++ b/lib/compiler_rt/fixunshfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage });
+}
+
+fn __fixunshfsi(a: f16) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig
new file mode 100644
index 0000000000..b804c52f96
--- /dev/null
+++ b/lib/compiler_rt/fixunshfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
+}
+
+pub fn __fixunshfti(a: f16) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunssfdi.zig b/lib/compiler_rt/fixunssfdi.zig
new file mode 100644
index 0000000000..544dfcd97e
--- /dev/null
+++ b/lib/compiler_rt/fixunssfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunssfsi.zig b/lib/compiler_rt/fixunssfsi.zig
new file mode 100644
index 0000000000..24b1e86694
--- /dev/null
+++ b/lib/compiler_rt/fixunssfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig
new file mode 100644
index 0000000000..7b1965b5ab
--- /dev/null
+++ b/lib/compiler_rt/fixunssfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
+}
+
+pub fn __fixunssfti(a: f32) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunstfdi.zig b/lib/compiler_rt/fixunstfdi.zig
new file mode 100644
index 0000000000..0657bf20c1
--- /dev/null
+++ b/lib/compiler_rt/fixunstfdi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __fixunskfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn _Qp_qtoux(a: *const f128) callconv(.C) u64 {
+ return floatToInt(u64, a.*);
+}
diff --git a/lib/compiler_rt/fixunstfsi.zig b/lib/compiler_rt/fixunstfsi.zig
new file mode 100644
index 0000000000..70725ddf38
--- /dev/null
+++ b/lib/compiler_rt/fixunstfsi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __fixunskfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn _Qp_qtoui(a: *const f128) callconv(.C) u32 {
+ return floatToInt(u32, a.*);
+}
diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig
new file mode 100644
index 0000000000..5e39db1065
--- /dev/null
+++ b/lib/compiler_rt/fixunstfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
+}
+
+pub fn __fixunstfti(a: f128) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunsxfdi.zig b/lib/compiler_rt/fixunsxfdi.zig
new file mode 100644
index 0000000000..cb2760af4e
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage });
+}
+
+fn __fixunsxfdi(a: f80) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunsxfsi.zig b/lib/compiler_rt/fixunsxfsi.zig
new file mode 100644
index 0000000000..bec36abbf4
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage });
+}
+
+fn __fixunsxfsi(a: f80) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig
new file mode 100644
index 0000000000..acd41469be
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
+}
+
+pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixxfdi.zig b/lib/compiler_rt/fixxfdi.zig
new file mode 100644
index 0000000000..0f249e0a92
--- /dev/null
+++ b/lib/compiler_rt/fixxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage });
+}
+
+fn __fixxfdi(a: f80) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixxfsi.zig b/lib/compiler_rt/fixxfsi.zig
new file mode 100644
index 0000000000..ac2158b7b8
--- /dev/null
+++ b/lib/compiler_rt/fixxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage });
+}
+
+fn __fixxfsi(a: f80) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig
new file mode 100644
index 0000000000..fb547f4115
--- /dev/null
+++ b/lib/compiler_rt/fixxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
+}
+
+fn __fixxfti(a: f80) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/floatXiYf.zig b/lib/compiler_rt/floatXiYf.zig
deleted file mode 100644
index 068413f715..0000000000
--- a/lib/compiler_rt/floatXiYf.zig
+++ /dev/null
@@ -1,222 +0,0 @@
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const std = @import("std");
-const math = std.math;
-const expect = std.testing.expect;
-
-pub fn floatXiYf(comptime T: type, x: anytype) T {
- @setRuntimeSafety(is_test);
-
- if (x == 0) return 0;
-
- // Various constants whose values follow from the type parameters.
- // Any reasonable optimizer will fold and propagate all of these.
- const Z = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(x)));
- const uT = std.meta.Int(.unsigned, @bitSizeOf(T));
- const inf = math.inf(T);
- const float_bits = @bitSizeOf(T);
- const int_bits = @bitSizeOf(@TypeOf(x));
- const exp_bits = math.floatExponentBits(T);
- const fractional_bits = math.floatFractionalBits(T);
- const exp_bias = math.maxInt(std.meta.Int(.unsigned, exp_bits - 1));
- const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
- const max_exp = exp_bias;
-
- // Sign
- var abs_val = math.absCast(x);
- const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
- var result: uT = sign_bit;
-
- // Compute significand
- var exp = int_bits - @clz(Z, abs_val) - 1;
- if (int_bits <= fractional_bits or exp <= fractional_bits) {
- const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
-
- // Shift up result to line up with the significand - no rounding required
- result = (@intCast(uT, abs_val) << shift_amt);
- result ^= implicit_bit; // Remove implicit integer bit
- } else {
- var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
- const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
-
- // Shift down result and remove implicit integer bit
- result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
-
- // Round result, including round-to-even for exact ties
- result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
- }
-
- // Compute exponent
- if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
- return @bitCast(T, sign_bit | @bitCast(uT, inf));
-
- result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
-
- // If the result included a carry, we need to restore the explicit integer bit
- if (T == f80) result |= 1 << fractional_bits;
-
- return @bitCast(T, sign_bit | result);
-}
-
-// Conversion to f16
-pub fn __floatsihf(a: i32) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatunsihf(a: u32) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatdihf(a: i64) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatundihf(a: u64) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floattihf(a: i128) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatuntihf(a: u128) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-// Conversion to f32
-pub fn __floatsisf(a: i32) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatunsisf(a: u32) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatdisf(a: i64) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatundisf(a: u64) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floattisf(a: i128) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatuntisf(a: u128) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-// Conversion to f64
-pub fn __floatsidf(a: i32) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatunsidf(a: u32) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatdidf(a: i64) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatundidf(a: u64) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floattidf(a: i128) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatuntidf(a: u128) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-// Conversion to f80
-pub fn __floatsixf(a: i32) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatunsixf(a: u32) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatdixf(a: i64) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatundixf(a: u64) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floattixf(a: i128) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatuntixf(a: u128) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-// Conversion to f128
-pub fn __floatsitf(a: i32) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatunsitf(a: u32) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatditf(a: i64) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatunditf(a: u64) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floattitf(a: i128) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatuntitf(a: u128) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-// Conversion to f32
-pub fn __aeabi_ui2f(arg: u32) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_i2f(arg: i32) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_ul2f(arg: u64) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_l2f(arg: i64) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-// Conversion to f64
-pub fn __aeabi_ui2d(arg: u32) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_i2d(arg: i32) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_ul2d(arg: u64) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_l2d(arg: i64) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-test {
- _ = @import("floatXiYf_test.zig");
-}
diff --git a/lib/compiler_rt/float_to_int.zig b/lib/compiler_rt/float_to_int.zig
new file mode 100644
index 0000000000..49d41be442
--- /dev/null
+++ b/lib/compiler_rt/float_to_int.zig
@@ -0,0 +1,55 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+const Log2Int = math.Log2Int;
+
+pub inline fn floatToInt(comptime I: type, a: anytype) I {
+ const F = @TypeOf(a);
+ const float_bits = @typeInfo(F).Float.bits;
+ const int_bits = @typeInfo(I).Int.bits;
+ const rep_t = Int(.unsigned, float_bits);
+ const sig_bits = math.floatMantissaBits(F);
+ const exp_bits = math.floatExponentBits(F);
+ const fractional_bits = math.floatFractionalBits(F);
+
+ const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
+ const max_exp = (1 << (exp_bits - 1));
+ const exp_bias = max_exp - 1;
+ const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
+
+ // Break a into sign, exponent, significand
+ const a_rep: rep_t = @bitCast(rep_t, a);
+ const negative = (a_rep >> (float_bits - 1)) != 0;
+ const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
+ const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
+
+ // If the exponent is negative, the result rounds to zero.
+ if (exponent < 0) return 0;
+
+ // If the value is too large for the integer type, saturate.
+ switch (@typeInfo(I).Int.signedness) {
+ .unsigned => {
+ if (negative) return 0;
+ if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
+ },
+ .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
+ return if (negative) math.minInt(I) else math.maxInt(I);
+ },
+ }
+
+ // If 0 <= exponent < sig_bits, right shift to get the result.
+ // Otherwise, shift left.
+ var result: I = undefined;
+ if (exponent < fractional_bits) {
+ result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
+ } else {
+ result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
+ }
+
+ if ((@typeInfo(I).Int.signedness == .signed) and negative)
+ return ~result +% 1;
+ return result;
+}
+
+test {
+ _ = @import("float_to_int_test.zig");
+}
diff --git a/lib/compiler_rt/fixXfYi_test.zig b/lib/compiler_rt/float_to_int_test.zig
index 00ed455609..676c12e914 100644
--- a/lib/compiler_rt/fixXfYi_test.zig
+++ b/lib/compiler_rt/float_to_int_test.zig
@@ -1,31 +1,33 @@
const std = @import("std");
const testing = std.testing;
const math = std.math;
-const fixXfYi = @import("fixXfYi.zig").fixXfYi;
+
+const __fixunshfti = @import("fixunshfti.zig").__fixunshfti;
+const __fixunsxfti = @import("fixunsxfti.zig").__fixunsxfti;
// Conversion from f32
-const __fixsfsi = @import("fixXfYi.zig").__fixsfsi;
-const __fixunssfsi = @import("fixXfYi.zig").__fixunssfsi;
-const __fixsfdi = @import("fixXfYi.zig").__fixsfdi;
-const __fixunssfdi = @import("fixXfYi.zig").__fixunssfdi;
-const __fixsfti = @import("fixXfYi.zig").__fixsfti;
-const __fixunssfti = @import("fixXfYi.zig").__fixunssfti;
+const __fixsfsi = @import("fixsfsi.zig").__fixsfsi;
+const __fixunssfsi = @import("fixunssfsi.zig").__fixunssfsi;
+const __fixsfdi = @import("fixsfdi.zig").__fixsfdi;
+const __fixunssfdi = @import("fixunssfdi.zig").__fixunssfdi;
+const __fixsfti = @import("fixsfti.zig").__fixsfti;
+const __fixunssfti = @import("fixunssfti.zig").__fixunssfti;
// Conversion from f64
-const __fixdfsi = @import("fixXfYi.zig").__fixdfsi;
-const __fixunsdfsi = @import("fixXfYi.zig").__fixunsdfsi;
-const __fixdfdi = @import("fixXfYi.zig").__fixdfdi;
-const __fixunsdfdi = @import("fixXfYi.zig").__fixunsdfdi;
-const __fixdfti = @import("fixXfYi.zig").__fixdfti;
-const __fixunsdfti = @import("fixXfYi.zig").__fixunsdfti;
+const __fixdfsi = @import("fixdfsi.zig").__fixdfsi;
+const __fixunsdfsi = @import("fixunsdfsi.zig").__fixunsdfsi;
+const __fixdfdi = @import("fixdfdi.zig").__fixdfdi;
+const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi;
+const __fixdfti = @import("fixdfti.zig").__fixdfti;
+const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti;
// Conversion from f128
-const __fixtfsi = @import("fixXfYi.zig").__fixtfsi;
-const __fixunstfsi = @import("fixXfYi.zig").__fixunstfsi;
-const __fixtfdi = @import("fixXfYi.zig").__fixtfdi;
-const __fixunstfdi = @import("fixXfYi.zig").__fixunstfdi;
-const __fixtfti = @import("fixXfYi.zig").__fixtfti;
-const __fixunstfti = @import("fixXfYi.zig").__fixunstfti;
+const __fixtfsi = @import("fixtfsi.zig").__fixtfsi;
+const __fixunstfsi = @import("fixunstfsi.zig").__fixunstfsi;
+const __fixtfdi = @import("fixtfdi.zig").__fixtfdi;
+const __fixunstfdi = @import("fixunstfdi.zig").__fixunstfdi;
+const __fixtfti = @import("fixtfti.zig").__fixtfti;
+const __fixunstfti = @import("fixunstfti.zig").__fixunstfti;
fn test__fixsfsi(a: f32, expected: i32) !void {
const x = __fixsfsi(a);
@@ -927,21 +929,21 @@ test "fixunstfti" {
}
fn test__fixunshfti(a: f16, expected: u128) !void {
- const x = fixXfYi(u128, a);
+ const x = __fixunshfti(a);
try testing.expect(x == expected);
}
-test "fixXfYi for f16" {
+test "fixunshfti for f16" {
try test__fixunshfti(math.inf(f16), math.maxInt(u128));
try test__fixunshfti(math.floatMax(f16), 65504);
}
fn test__fixunsxfti(a: f80, expected: u128) !void {
- const x = fixXfYi(u128, a);
+ const x = __fixunsxfti(a);
try testing.expect(x == expected);
}
-test "fixXfYi for f80" {
+test "fixunsxfti for f80" {
try test__fixunsxfti(math.inf(f80), math.maxInt(u128));
try test__fixunsxfti(math.floatMax(f80), math.maxInt(u128));
try test__fixunsxfti(math.maxInt(u64), math.maxInt(u64));
diff --git a/lib/compiler_rt/floatdidf.zig b/lib/compiler_rt/floatdidf.zig
new file mode 100644
index 0000000000..9117e2189d
--- /dev/null
+++ b/lib/compiler_rt/floatdidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage });
+ } else {
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatdidf(a: i64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatdihf.zig b/lib/compiler_rt/floatdihf.zig
new file mode 100644
index 0000000000..f2f7236d6f
--- /dev/null
+++ b/lib/compiler_rt/floatdihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage });
+}
+
+fn __floatdihf(a: i64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatdisf.zig b/lib/compiler_rt/floatdisf.zig
new file mode 100644
index 0000000000..3de94c5103
--- /dev/null
+++ b/lib/compiler_rt/floatdisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage });
+ } else {
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatdisf(a: i64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatditf.zig b/lib/compiler_rt/floatditf.zig
new file mode 100644
index 0000000000..731c6d8d86
--- /dev/null
+++ b/lib/compiler_rt/floatditf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatdikf, .{ .name = "__floatdikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = common.linkage });
+ } else {
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatditf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatdikf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatdixf.zig b/lib/compiler_rt/floatdixf.zig
new file mode 100644
index 0000000000..7d80fdbeb8
--- /dev/null
+++ b/lib/compiler_rt/floatdixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage });
+}
+
+fn __floatdixf(a: i64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatsidf.zig b/lib/compiler_rt/floatsidf.zig
new file mode 100644
index 0000000000..e31c2616fd
--- /dev/null
+++ b/lib/compiler_rt/floatsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage });
+ } else {
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsidf(a: i32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatsihf.zig b/lib/compiler_rt/floatsihf.zig
new file mode 100644
index 0000000000..84b54298b5
--- /dev/null
+++ b/lib/compiler_rt/floatsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage });
+}
+
+fn __floatsihf(a: i32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatsisf.zig b/lib/compiler_rt/floatsisf.zig
new file mode 100644
index 0000000000..87f83315c1
--- /dev/null
+++ b/lib/compiler_rt/floatsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage });
+ } else {
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsisf(a: i32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatsitf.zig b/lib/compiler_rt/floatsitf.zig
new file mode 100644
index 0000000000..0954199170
--- /dev/null
+++ b/lib/compiler_rt/floatsitf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatsikf, .{ .name = "__floatsikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = common.linkage });
+ } else {
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsitf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatsikf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatsixf.zig b/lib/compiler_rt/floatsixf.zig
new file mode 100644
index 0000000000..76d266e17a
--- /dev/null
+++ b/lib/compiler_rt/floatsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage });
+}
+
+fn __floatsixf(a: i32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig
new file mode 100644
index 0000000000..1f1ac2f2ef
--- /dev/null
+++ b/lib/compiler_rt/floattidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
+}
+
+pub fn __floattidf(a: i128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig
new file mode 100644
index 0000000000..c7e45c7d53
--- /dev/null
+++ b/lib/compiler_rt/floattihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
+}
+
+fn __floattihf(a: i128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig
new file mode 100644
index 0000000000..5eb493d09b
--- /dev/null
+++ b/lib/compiler_rt/floattisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
+}
+
+pub fn __floattisf(a: i128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig
new file mode 100644
index 0000000000..0764c2d2c2
--- /dev/null
+++ b/lib/compiler_rt/floattitf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
+}
+
+pub fn __floattitf(a: i128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig
new file mode 100644
index 0000000000..def9bef4d5
--- /dev/null
+++ b/lib/compiler_rt/floattixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
+}
+
+fn __floattixf(a: i128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatundidf.zig b/lib/compiler_rt/floatundidf.zig
new file mode 100644
index 0000000000..d49575639e
--- /dev/null
+++ b/lib/compiler_rt/floatundidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = common.linkage });
+ } else {
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatundidf(a: u64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ul2d(a: u64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatundihf.zig b/lib/compiler_rt/floatundihf.zig
new file mode 100644
index 0000000000..6eff8aaec3
--- /dev/null
+++ b/lib/compiler_rt/floatundihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage });
+}
+
+fn __floatundihf(a: u64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatundisf.zig b/lib/compiler_rt/floatundisf.zig
new file mode 100644
index 0000000000..963670d85b
--- /dev/null
+++ b/lib/compiler_rt/floatundisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = common.linkage });
+ } else {
+ @export(__floatundisf, .{ .name = "__floatundisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatundisf(a: u64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ul2f(a: u64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatunditf.zig b/lib/compiler_rt/floatunditf.zig
new file mode 100644
index 0000000000..1eda21891d
--- /dev/null
+++ b/lib/compiler_rt/floatunditf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatundikf, .{ .name = "__floatundikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = common.linkage });
+ } else {
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunditf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatundikf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_uxtoq(c: *f128, a: u64) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatundixf.zig b/lib/compiler_rt/floatundixf.zig
new file mode 100644
index 0000000000..331b74df4f
--- /dev/null
+++ b/lib/compiler_rt/floatundixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage });
+}
+
+fn __floatundixf(a: u64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatunsidf.zig b/lib/compiler_rt/floatunsidf.zig
new file mode 100644
index 0000000000..1f5a47287a
--- /dev/null
+++ b/lib/compiler_rt/floatunsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = common.linkage });
+ } else {
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsidf(a: u32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ui2d(a: u32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatunsihf.zig b/lib/compiler_rt/floatunsihf.zig
new file mode 100644
index 0000000000..b2f679c18c
--- /dev/null
+++ b/lib/compiler_rt/floatunsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage });
+}
+
+pub fn __floatunsihf(a: u32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatunsisf.zig b/lib/compiler_rt/floatunsisf.zig
new file mode 100644
index 0000000000..46f336a4d8
--- /dev/null
+++ b/lib/compiler_rt/floatunsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = common.linkage });
+ } else {
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsisf(a: u32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ui2f(a: u32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatunsitf.zig b/lib/compiler_rt/floatunsitf.zig
new file mode 100644
index 0000000000..bee656c801
--- /dev/null
+++ b/lib/compiler_rt/floatunsitf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatunsikf, .{ .name = "__floatunsikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = common.linkage });
+ } else {
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsitf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatunsikf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_uitoq(c: *f128, a: u32) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatunsixf.zig b/lib/compiler_rt/floatunsixf.zig
new file mode 100644
index 0000000000..40492564fc
--- /dev/null
+++ b/lib/compiler_rt/floatunsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage });
+}
+
+fn __floatunsixf(a: u32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig
new file mode 100644
index 0000000000..a77a952fe9
--- /dev/null
+++ b/lib/compiler_rt/floatuntidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
+}
+
+pub fn __floatuntidf(a: u128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig
new file mode 100644
index 0000000000..0263b1da98
--- /dev/null
+++ b/lib/compiler_rt/floatuntihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
+}
+
+fn __floatuntihf(a: u128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig
new file mode 100644
index 0000000000..3edf636987
--- /dev/null
+++ b/lib/compiler_rt/floatuntisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
+}
+
+pub fn __floatuntisf(a: u128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig
new file mode 100644
index 0000000000..1a755cccdb
--- /dev/null
+++ b/lib/compiler_rt/floatuntitf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = common.linkage });
+ } else {
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatuntitf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatuntikf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig
new file mode 100644
index 0000000000..07017d1f57
--- /dev/null
+++ b/lib/compiler_rt/floatuntixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
+}
+
+pub fn __floatuntixf(a: u128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floor.zig b/lib/compiler_rt/floor.zig
index 783898fca7..ef02786eb4 100644
--- a/lib/compiler_rt/floor.zig
+++ b/lib/compiler_rt/floor.zig
@@ -1,12 +1,27 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/floorf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/floor.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/floor.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floorh, .{ .name = "__floorh", .linkage = common.linkage });
+ @export(floorf, .{ .name = "floorf", .linkage = common.linkage });
+ @export(floor, .{ .name = "floor", .linkage = common.linkage });
+ @export(__floorx, .{ .name = "__floorx", .linkage = common.linkage });
+ const floorq_sym_name = if (common.want_ppc_abi) "floorf128" else "floorq";
+ @export(floorq, .{ .name = floorq_sym_name, .linkage = common.linkage });
+ @export(floorl, .{ .name = "floorl", .linkage = common.linkage });
+}
pub fn __floorh(x: f16) callconv(.C) f16 {
var u = @bitCast(u16, x);
diff --git a/lib/compiler_rt/fma.zig b/lib/compiler_rt/fma.zig
index b121db212c..aa37276ac3 100644
--- a/lib/compiler_rt/fma.zig
+++ b/lib/compiler_rt/fma.zig
@@ -1,13 +1,28 @@
-// Ported from musl, which is MIT licensed:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fmal.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fmaf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fma.c
+//! Ported from musl, which is MIT licensed:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fmal.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fmaf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fma.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fmah, .{ .name = "__fmah", .linkage = common.linkage });
+ @export(fmaf, .{ .name = "fmaf", .linkage = common.linkage });
+ @export(fma, .{ .name = "fma", .linkage = common.linkage });
+ @export(__fmax, .{ .name = "__fmax", .linkage = common.linkage });
+ const fmaq_sym_name = if (common.want_ppc_abi) "fmaf128" else "fmaq";
+ @export(fmaq, .{ .name = fmaq_sym_name, .linkage = common.linkage });
+ @export(fmal, .{ .name = "fmal", .linkage = common.linkage });
+}
pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/fmax.zig b/lib/compiler_rt/fmax.zig
index defc935afc..5fb87e0183 100644
--- a/lib/compiler_rt/fmax.zig
+++ b/lib/compiler_rt/fmax.zig
@@ -1,5 +1,20 @@
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fmaxh, .{ .name = "__fmaxh", .linkage = common.linkage });
+ @export(fmaxf, .{ .name = "fmaxf", .linkage = common.linkage });
+ @export(fmax, .{ .name = "fmax", .linkage = common.linkage });
+ @export(__fmaxx, .{ .name = "__fmaxx", .linkage = common.linkage });
+ const fmaxq_sym_name = if (common.want_ppc_abi) "fmaxf128" else "fmaxq";
+ @export(fmaxq, .{ .name = fmaxq_sym_name, .linkage = common.linkage });
+ @export(fmaxl, .{ .name = "fmaxl", .linkage = common.linkage });
+}
pub fn __fmaxh(x: f16, y: f16) callconv(.C) f16 {
return generic_fmax(f16, x, y);
diff --git a/lib/compiler_rt/fmin.zig b/lib/compiler_rt/fmin.zig
index e93300bd4b..cc2fd7b3ac 100644
--- a/lib/compiler_rt/fmin.zig
+++ b/lib/compiler_rt/fmin.zig
@@ -1,5 +1,20 @@
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fminh, .{ .name = "__fminh", .linkage = common.linkage });
+ @export(fminf, .{ .name = "fminf", .linkage = common.linkage });
+ @export(fmin, .{ .name = "fmin", .linkage = common.linkage });
+ @export(__fminx, .{ .name = "__fminx", .linkage = common.linkage });
+ const fminq_sym_name = if (common.want_ppc_abi) "fminf128" else "fminq";
+ @export(fminq, .{ .name = fminq_sym_name, .linkage = common.linkage });
+ @export(fminl, .{ .name = "fminl", .linkage = common.linkage });
+}
pub fn __fminh(x: f16, y: f16) callconv(.C) f16 {
return generic_fmin(f16, x, y);
diff --git a/lib/compiler_rt/fmod.zig b/lib/compiler_rt/fmod.zig
index 5d413ca37d..22b20438cc 100644
--- a/lib/compiler_rt/fmod.zig
+++ b/lib/compiler_rt/fmod.zig
@@ -2,7 +2,21 @@ const builtin = @import("builtin");
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
-const normalize = @import("divdf3.zig").normalize;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+const normalize = common.normalize;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fmodh, .{ .name = "__fmodh", .linkage = common.linkage });
+ @export(fmodf, .{ .name = "fmodf", .linkage = common.linkage });
+ @export(fmod, .{ .name = "fmod", .linkage = common.linkage });
+ @export(__fmodx, .{ .name = "__fmodx", .linkage = common.linkage });
+ const fmodq_sym_name = if (common.want_ppc_abi) "fmodf128" else "fmodq";
+ @export(fmodq, .{ .name = fmodq_sym_name, .linkage = common.linkage });
+ @export(fmodl, .{ .name = "fmodl", .linkage = common.linkage });
+}
pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 {
// TODO: more efficient implementation
@@ -20,8 +34,6 @@ pub fn fmod(x: f64, y: f64) callconv(.C) f64 {
/// fmodx - floating modulo large, returns the remainder of division for f80 types
/// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
- @setRuntimeSafety(builtin.is_test);
-
const T = f80;
const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
@@ -120,7 +132,6 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
/// fmodq - floating modulo large, returns the remainder of division for f128 types
/// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
- @setRuntimeSafety(builtin.is_test);
var amod = a;
var bmod = b;
const aPtr_u64 = @ptrCast([*]u64, &amod);
@@ -249,8 +260,6 @@ pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.C) c_longdouble {
}
inline fn generic_fmod(comptime T: type, x: T, y: T) T {
- @setRuntimeSafety(false);
-
const bits = @typeInfo(T).Float.bits;
const uint = std.meta.Int(.unsigned, bits);
const log2uint = math.Log2Int(uint);
diff --git a/lib/compiler_rt/gedf2.zig b/lib/compiler_rt/gedf2.zig
new file mode 100644
index 0000000000..684ba665b5
--- /dev/null
+++ b/lib/compiler_rt/gedf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = common.linkage });
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gedf2, .{ .name = "__gedf2", .linkage = common.linkage });
+ @export(__gtdf2, .{ .name = "__gtdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __gedf2(a, b);
+}
+
+fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.GE, a, b) != .Less);
+}
+
+fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.GE, a, b) == .Greater);
+}
diff --git a/lib/compiler_rt/gesf2.zig b/lib/compiler_rt/gesf2.zig
new file mode 100644
index 0000000000..3d455e52bf
--- /dev/null
+++ b/lib/compiler_rt/gesf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = common.linkage });
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gesf2, .{ .name = "__gesf2", .linkage = common.linkage });
+ @export(__gtsf2, .{ .name = "__gtsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __gesf2(a, b);
+}
+
+fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.GE, a, b) != .Less);
+}
+
+fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Greater);
+}
diff --git a/lib/compiler_rt/getf2.zig b/lib/compiler_rt/getf2.zig
new file mode 100644
index 0000000000..8d9d39c1f9
--- /dev/null
+++ b/lib/compiler_rt/getf2.zig
@@ -0,0 +1,39 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__gekf2, .{ .name = "__gekf2", .linkage = common.linkage });
+ @export(__gtkf2, .{ .name = "__gtkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ // These exports are handled in cmptf2.zig because gt and ge on sparc
+ // are based on calling _Qp_cmp.
+ } else {
+ @export(__getf2, .{ .name = "__getf2", .linkage = common.linkage });
+ @export(__gttf2, .{ .name = "__gttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+fn __getf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gtkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
diff --git a/lib/compiler_rt/gexf2.zig b/lib/compiler_rt/gexf2.zig
new file mode 100644
index 0000000000..6bb88fbb8f
--- /dev/null
+++ b/lib/compiler_rt/gexf2.zig
@@ -0,0 +1,17 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__gexf2, .{ .name = "__gexf2", .linkage = common.linkage });
+ @export(__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage });
+}
+
+fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.GE, a, b));
+}
+
+fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __gexf2(a, b);
+}
diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig
index 0f3400d37e..53205e2ed9 100644
--- a/lib/compiler_rt/int.zig
+++ b/lib/compiler_rt/int.zig
@@ -1,22 +1,45 @@
-// Builtin functions that operate on integer types
+//! Builtin functions that operate on integer types
+
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
-
+const arch = builtin.cpu.arch;
+const is_test = builtin.is_test;
+const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
-pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
+pub const panic = common.panic;
+
+comptime {
+ @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage });
+ @export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage });
+ @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage });
+ if (common.want_aeabi) {
+ @export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage });
+ @export(__aeabi_uidiv, .{ .name = "__aeabi_uidiv", .linkage = common.linkage });
+ } else {
+ @export(__divsi3, .{ .name = "__divsi3", .linkage = common.linkage });
+ @export(__udivsi3, .{ .name = "__udivsi3", .linkage = common.linkage });
+ }
+ @export(__divdi3, .{ .name = "__divdi3", .linkage = common.linkage });
+ @export(__udivdi3, .{ .name = "__udivdi3", .linkage = common.linkage });
+ @export(__modsi3, .{ .name = "__modsi3", .linkage = common.linkage });
+ @export(__moddi3, .{ .name = "__moddi3", .linkage = common.linkage });
+ @export(__umodsi3, .{ .name = "__umodsi3", .linkage = common.linkage });
+ @export(__umoddi3, .{ .name = "__umoddi3", .linkage = common.linkage });
+ @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = common.linkage });
+ @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = common.linkage });
+}
+pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
const d = __divdi3(a, b);
rem.* = a -% (d *% b);
return d;
}
pub fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}
@@ -25,8 +48,6 @@ test "test_udivmoddi4" {
}
pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
-
// Set aside the sign of the quotient.
const sign = @bitCast(u64, (a ^ b) >> 63);
// Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
@@ -64,8 +85,6 @@ fn test_one_divdi3(a: i64, b: i64, expected_q: i64) !void {
}
pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
-
// Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
const abs_a = (a ^ (a >> 63)) -% (a >> 63);
const abs_b = (b ^ (b >> 63)) -% (b >> 63);
@@ -104,13 +123,10 @@ fn test_one_moddi3(a: i64, b: i64, expected_r: i64) !void {
}
pub fn __udivdi3(a: u64, b: u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
return __udivmoddi4(a, b, null);
}
pub fn __umoddi3(a: u64, b: u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
-
var r: u64 = undefined;
_ = __udivmoddi4(a, b, &r);
return r;
@@ -130,8 +146,6 @@ fn test_one_umoddi3(a: u64, b: u64, expected_r: u64) !void {
}
pub fn __divmodsi4(a: i32, b: i32, rem: *i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
const d = __divsi3(a, b);
rem.* = a -% (d * b);
return d;
@@ -166,16 +180,20 @@ fn test_one_divmodsi4(a: i32, b: i32, expected_q: i32, expected_r: i32) !void {
}
pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
-
const d = __udivsi3(a, b);
rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b)));
return d;
}
pub fn __divsi3(n: i32, d: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
+ return div_i32(n, d);
+}
+fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 {
+ return div_i32(n, d);
+}
+
+inline fn div_i32(n: i32, d: i32) i32 {
// Set aside the sign of the quotient.
const sign = @bitCast(u32, (n ^ d) >> 31);
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
@@ -213,8 +231,14 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) !void {
}
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
+ return div_u32(n, d);
+}
+fn __aeabi_uidiv(n: u32, d: u32) callconv(.AAPCS) u32 {
+ return div_u32(n, d);
+}
+
+inline fn div_u32(n: u32, d: u32) u32 {
const n_uword_bits: c_uint = 32;
// special cases
if (d == 0) return 0; // ?!
@@ -400,8 +424,6 @@ fn test_one_udivsi3(a: u32, b: u32, expected_q: u32) !void {
}
pub fn __modsi3(n: i32, d: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
return n -% __divsi3(n, d) *% d;
}
@@ -431,8 +453,6 @@ fn test_one_modsi3(a: i32, b: i32, expected_r: i32) !void {
}
pub fn __umodsi3(n: u32, d: u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
-
return n -% __udivsi3(n, d) *% d;
}
@@ -583,8 +603,6 @@ fn test_one_umodsi3(a: u32, b: u32, expected_r: u32) !void {
}
pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var ua = @bitCast(u32, a);
var ub = @bitCast(u32, b);
var r: u32 = 0;
diff --git a/lib/compiler_rt/int_to_float.zig b/lib/compiler_rt/int_to_float.zig
new file mode 100644
index 0000000000..233dfec815
--- /dev/null
+++ b/lib/compiler_rt/int_to_float.zig
@@ -0,0 +1,58 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+
+pub fn intToFloat(comptime T: type, x: anytype) T {
+ if (x == 0) return 0;
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const Z = Int(.unsigned, @bitSizeOf(@TypeOf(x)));
+ const uT = Int(.unsigned, @bitSizeOf(T));
+ const inf = math.inf(T);
+ const float_bits = @bitSizeOf(T);
+ const int_bits = @bitSizeOf(@TypeOf(x));
+ const exp_bits = math.floatExponentBits(T);
+ const fractional_bits = math.floatFractionalBits(T);
+ const exp_bias = math.maxInt(Int(.unsigned, exp_bits - 1));
+ const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
+ const max_exp = exp_bias;
+
+ // Sign
+ var abs_val = math.absCast(x);
+ const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
+ var result: uT = sign_bit;
+
+ // Compute significand
+ var exp = int_bits - @clz(Z, abs_val) - 1;
+ if (int_bits <= fractional_bits or exp <= fractional_bits) {
+ const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
+
+ // Shift up result to line up with the significand - no rounding required
+ result = (@intCast(uT, abs_val) << shift_amt);
+ result ^= implicit_bit; // Remove implicit integer bit
+ } else {
+ var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
+ const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
+
+ // Shift down result and remove implicit integer bit
+ result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
+
+ // Round result, including round-to-even for exact ties
+ result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
+ }
+
+ // Compute exponent
+ if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
+ return @bitCast(T, sign_bit | @bitCast(uT, inf));
+
+ result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
+
+ // If the result included a carry, we need to restore the explicit integer bit
+ if (T == f80) result |= 1 << fractional_bits;
+
+ return @bitCast(T, sign_bit | result);
+}
+
+test {
+ _ = @import("int_to_float_test.zig");
+}
diff --git a/lib/compiler_rt/floatXiYf_test.zig b/lib/compiler_rt/int_to_float_test.zig
index cffa2a5b42..f6eabbf4ba 100644
--- a/lib/compiler_rt/floatXiYf_test.zig
+++ b/lib/compiler_rt/int_to_float_test.zig
@@ -2,31 +2,32 @@ const std = @import("std");
const builtin = @import("builtin");
const testing = std.testing;
const math = std.math;
-const floatXiYf = @import("floatXiYf.zig").floatXiYf;
+
+const __floatunsihf = @import("floatunsihf.zig").__floatunsihf;
// Conversion to f32
-const __floatsisf = @import("floatXiYf.zig").__floatsisf;
-const __floatunsisf = @import("floatXiYf.zig").__floatunsisf;
-const __floatdisf = @import("floatXiYf.zig").__floatdisf;
-const __floatundisf = @import("floatXiYf.zig").__floatundisf;
-const __floattisf = @import("floatXiYf.zig").__floattisf;
-const __floatuntisf = @import("floatXiYf.zig").__floatuntisf;
+const __floatsisf = @import("floatsisf.zig").__floatsisf;
+const __floatunsisf = @import("floatunsisf.zig").__floatunsisf;
+const __floatdisf = @import("floatdisf.zig").__floatdisf;
+const __floatundisf = @import("floatundisf.zig").__floatundisf;
+const __floattisf = @import("floattisf.zig").__floattisf;
+const __floatuntisf = @import("floatuntisf.zig").__floatuntisf;
// Conversion to f64
-const __floatsidf = @import("floatXiYf.zig").__floatsidf;
-const __floatunsidf = @import("floatXiYf.zig").__floatunsidf;
-const __floatdidf = @import("floatXiYf.zig").__floatdidf;
-const __floatundidf = @import("floatXiYf.zig").__floatundidf;
-const __floattidf = @import("floatXiYf.zig").__floattidf;
-const __floatuntidf = @import("floatXiYf.zig").__floatuntidf;
+const __floatsidf = @import("floatsidf.zig").__floatsidf;
+const __floatunsidf = @import("floatunsidf.zig").__floatunsidf;
+const __floatdidf = @import("floatdidf.zig").__floatdidf;
+const __floatundidf = @import("floatundidf.zig").__floatundidf;
+const __floattidf = @import("floattidf.zig").__floattidf;
+const __floatuntidf = @import("floatuntidf.zig").__floatuntidf;
// Conversion to f128
-const __floatsitf = @import("floatXiYf.zig").__floatsitf;
-const __floatunsitf = @import("floatXiYf.zig").__floatunsitf;
-const __floatditf = @import("floatXiYf.zig").__floatditf;
-const __floatunditf = @import("floatXiYf.zig").__floatunditf;
-const __floattitf = @import("floatXiYf.zig").__floattitf;
-const __floatuntitf = @import("floatXiYf.zig").__floatuntitf;
+const __floatsitf = @import("floatsitf.zig").__floatsitf;
+const __floatunsitf = @import("floatunsitf.zig").__floatunsitf;
+const __floatditf = @import("floatditf.zig").__floatditf;
+const __floatunditf = @import("floatunditf.zig").__floatunditf;
+const __floattitf = @import("floattitf.zig").__floattitf;
+const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
fn test__floatsisf(a: i32, expected: u32) !void {
const r = __floatsisf(a);
@@ -791,45 +792,47 @@ fn make_tf(high: u64, low: u64) f128 {
}
test "conversion to f16" {
- try testing.expect(floatXiYf(f16, @as(u32, 0)) == 0.0);
- try testing.expect(floatXiYf(f16, @as(u32, 1)) == 1.0);
- try testing.expect(floatXiYf(f16, @as(u32, 65504)) == 65504);
- try testing.expect(floatXiYf(f16, @as(u32, 65504 + (1 << 4))) == math.inf(f16));
+ try testing.expect(__floatunsihf(@as(u32, 0)) == 0.0);
+ try testing.expect(__floatunsihf(@as(u32, 1)) == 1.0);
+ try testing.expect(__floatunsihf(@as(u32, 65504)) == 65504);
+ try testing.expect(__floatunsihf(@as(u32, 65504 + (1 << 4))) == math.inf(f16));
}
test "conversion to f32" {
- try testing.expect(floatXiYf(f32, @as(u32, 0)) == 0.0);
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u32))) != 1.0);
- try testing.expect(floatXiYf(f32, @as(i32, math.minInt(i32))) != 1.0);
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24))) == math.maxInt(u24));
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24)) + 1) == math.maxInt(u24) + 1); // 0x100_0000 - Exact
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24)) + 2) == math.maxInt(u24) + 1); // 0x100_0001 - Tie: Rounds down to even
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24)) + 3) == math.maxInt(u24) + 3); // 0x100_0002 - Exact
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24)) + 4) == math.maxInt(u24) + 5); // 0x100_0003 - Tie: Rounds up to even
- try testing.expect(floatXiYf(f32, @as(u32, math.maxInt(u24)) + 5) == math.maxInt(u24) + 5); // 0x100_0004 - Exact
+ try testing.expect(__floatunsisf(@as(u32, 0)) == 0.0);
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u32))) != 1.0);
+ try testing.expect(__floatsisf(@as(i32, math.minInt(i32))) != 1.0);
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24))) == math.maxInt(u24));
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 1) == math.maxInt(u24) + 1); // 0x100_0000 - Exact
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 2) == math.maxInt(u24) + 1); // 0x100_0001 - Tie: Rounds down to even
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 3) == math.maxInt(u24) + 3); // 0x100_0002 - Exact
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 4) == math.maxInt(u24) + 5); // 0x100_0003 - Tie: Rounds up to even
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 5) == math.maxInt(u24) + 5); // 0x100_0004 - Exact
}
test "conversion to f80" {
if (builtin.zig_backend == .stage1 and builtin.cpu.arch != .x86_64)
return error.SkipZigTest; // https://github.com/ziglang/zig/issues/11408
- try testing.expect(floatXiYf(f80, @as(i80, -12)) == -12);
- try testing.expect(@floatToInt(u80, floatXiYf(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0);
- try testing.expect(@floatToInt(u80, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1);
-
- try testing.expect(floatXiYf(f80, @as(u32, 0)) == 0.0);
- try testing.expect(floatXiYf(f80, @as(u32, 1)) == 1.0);
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24));
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64));
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up
-
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up
- try testing.expect(@floatToInt(u128, floatXiYf(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact
+ const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+ try testing.expect(intToFloat(f80, @as(i80, -12)) == -12);
+ try testing.expect(@floatToInt(u80, intToFloat(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0);
+ try testing.expect(@floatToInt(u80, intToFloat(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1);
+
+ try testing.expect(intToFloat(f80, @as(u32, 0)) == 0.0);
+ try testing.expect(intToFloat(f80, @as(u32, 1)) == 1.0);
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24));
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64));
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up
+
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact
}
diff --git a/lib/compiler_rt/log.zig b/lib/compiler_rt/log.zig
index 6e705dae60..90a38ba381 100644
--- a/lib/compiler_rt/log.zig
+++ b/lib/compiler_rt/log.zig
@@ -1,12 +1,27 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/lnf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/ln.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/lnf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ln.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const testing = std.testing;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__logh, .{ .name = "__logh", .linkage = common.linkage });
+ @export(logf, .{ .name = "logf", .linkage = common.linkage });
+ @export(log, .{ .name = "log", .linkage = common.linkage });
+ @export(__logx, .{ .name = "__logx", .linkage = common.linkage });
+ const logq_sym_name = if (common.want_ppc_abi) "logf128" else "logq";
+ @export(logq, .{ .name = logq_sym_name, .linkage = common.linkage });
+ @export(logl, .{ .name = "logl", .linkage = common.linkage });
+}
pub fn __logh(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/log10.zig b/lib/compiler_rt/log10.zig
index 47499d2739..406eb8d0c1 100644
--- a/lib/compiler_rt/log10.zig
+++ b/lib/compiler_rt/log10.zig
@@ -1,13 +1,28 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/log10f.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/log10.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log10f.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log10.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const testing = std.testing;
const maxInt = std.math.maxInt;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__log10h, .{ .name = "__log10h", .linkage = common.linkage });
+ @export(log10f, .{ .name = "log10f", .linkage = common.linkage });
+ @export(log10, .{ .name = "log10", .linkage = common.linkage });
+ @export(__log10x, .{ .name = "__log10x", .linkage = common.linkage });
+ const log10q_sym_name = if (common.want_ppc_abi) "log10f128" else "log10q";
+ @export(log10q, .{ .name = log10q_sym_name, .linkage = common.linkage });
+ @export(log10l, .{ .name = "log10l", .linkage = common.linkage });
+}
pub fn __log10h(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/log2.zig b/lib/compiler_rt/log2.zig
index aa294b33fd..6f6c07212a 100644
--- a/lib/compiler_rt/log2.zig
+++ b/lib/compiler_rt/log2.zig
@@ -1,13 +1,28 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/log2f.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/log2.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log2f.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log2.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
const maxInt = std.math.maxInt;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__log2h, .{ .name = "__log2h", .linkage = common.linkage });
+ @export(log2f, .{ .name = "log2f", .linkage = common.linkage });
+ @export(log2, .{ .name = "log2", .linkage = common.linkage });
+ @export(__log2x, .{ .name = "__log2x", .linkage = common.linkage });
+ const log2q_sym_name = if (common.want_ppc_abi) "log2f128" else "log2q";
+ @export(log2q, .{ .name = log2q_sym_name, .linkage = common.linkage });
+ @export(log2l, .{ .name = "log2l", .linkage = common.linkage });
+}
pub fn __log2h(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig
index 42cbda9627..5fa34938ff 100644
--- a/lib/compiler_rt/modti3.zig
+++ b/lib/compiler_rt/modti3.zig
@@ -1,14 +1,47 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/modti3.c
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/modti3.c
-const udivmod = @import("udivmod.zig").udivmod;
+const std = @import("std");
const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ }
+}
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
- @setRuntimeSafety(builtin.is_test);
+ return mod(a, b);
+}
+const v128 = @import("std").meta.Vector(2, u64);
+
+fn __modti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, mod(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn mod(a: i128, b: i128) i128 {
const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
@@ -20,14 +53,6 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
return (@bitCast(i128, r) ^ s_a) -% s_a; // negate if s == -1
}
-const v128 = @import("std").meta.Vector(2, u64);
-pub fn __modti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- return @bitCast(v128, @call(.{ .modifier = .always_inline }, __modti3, .{
- @bitCast(i128, a),
- @bitCast(i128, b),
- }));
-}
-
test {
_ = @import("modti3_test.zig");
}
diff --git a/lib/compiler_rt/muldf3.zig b/lib/compiler_rt/muldf3.zig
new file mode 100644
index 0000000000..ef7ab9fbf7
--- /dev/null
+++ b/lib/compiler_rt/muldf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = common.linkage });
+ } else {
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
+ return mulf3(f64, a, b);
+}
+
+fn __aeabi_dmul(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return mulf3(f64, a, b);
+}
diff --git a/lib/compiler_rt/muldi3.zig b/lib/compiler_rt/muldi3.zig
index f0d857e1e9..a51c6c7b76 100644
--- a/lib/compiler_rt/muldi3.zig
+++ b/lib/compiler_rt/muldi3.zig
@@ -1,10 +1,36 @@
+//! Ported from
+//! https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c
+
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage });
+ } else {
+ @export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage });
+ }
+}
+
+pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
+ return mul(a, b);
+}
-// Ported from
-// https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c
+fn __aeabi_lmul(a: i64, b: i64) callconv(.AAPCS) i64 {
+ return mul(a, b);
+}
+
+inline fn mul(a: i64, b: i64) i64 {
+ const x = dwords{ .all = a };
+ const y = dwords{ .all = b };
+ var r = dwords{ .all = muldsi3(x.s.low, y.s.low) };
+ r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
+ return r.all;
+}
const dwords = extern union {
all: i64,
@@ -20,9 +46,7 @@ const dwords = extern union {
},
};
-fn __muldsi3(a: u32, b: u32) i64 {
- @setRuntimeSafety(is_test);
-
+fn muldsi3(a: u32, b: u32) i64 {
const bits_in_word_2 = @sizeOf(i32) * 8 / 2;
const lower_mask = (~@as(u32, 0)) >> bits_in_word_2;
@@ -42,16 +66,6 @@ fn __muldsi3(a: u32, b: u32) i64 {
return r.all;
}
-pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(is_test);
-
- const x = dwords{ .all = a };
- const y = dwords{ .all = b };
- var r = dwords{ .all = __muldsi3(x.s.low, y.s.low) };
- r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
- return r.all;
-}
-
test {
_ = @import("muldi3_test.zig");
}
diff --git a/lib/compiler_rt/mulXf3.zig b/lib/compiler_rt/mulf3.zig
index 147efd0ab5..f6949ee3ce 100644
--- a/lib/compiler_rt/mulXf3.zig
+++ b/lib/compiler_rt/mulf3.zig
@@ -1,36 +1,11 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
-
const std = @import("std");
const math = std.math;
const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
+const common = @import("./common.zig");
-pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
- return mulXf3(f128, a, b);
-}
-pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
- return mulXf3(f80, a, b);
-}
-pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
- return mulXf3(f64, a, b);
-}
-pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
- return mulXf3(f32, a, b);
-}
-
-pub fn __aeabi_fmul(a: f32, b: f32) callconv(.C) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __mulsf3, .{ a, b });
-}
-
-pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __muldf3, .{ a, b });
-}
-
-fn mulXf3(comptime T: type, a: T, b: T) T {
+/// Ported from:
+/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
+pub inline fn mulf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(T).Float.bits;
const significandBits = math.floatMantissaBits(T);
@@ -121,7 +96,7 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
var productHi: ZSignificand = undefined;
var productLo: ZSignificand = undefined;
const left_align_shift = ZSignificandBits - fractionalBits - 1;
- wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
+ common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale;
@@ -183,141 +158,9 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
return @bitCast(T, result);
}
-fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
- @setRuntimeSafety(builtin.is_test);
- switch (Z) {
- u16 => {
- // 16x16 --> 32 bit multiply
- const product = @as(u32, a) * @as(u32, b);
- hi.* = @intCast(u16, product >> 16);
- lo.* = @truncate(u16, product);
- },
- u32 => {
- // 32x32 --> 64 bit multiply
- const product = @as(u64, a) * @as(u64, b);
- hi.* = @intCast(u32, product >> 32);
- lo.* = @truncate(u32, product);
- },
- u64 => {
- const S = struct {
- fn loWord(x: u64) u64 {
- return @truncate(u32, x);
- }
- fn hiWord(x: u64) u64 {
- return @intCast(u32, x >> 32);
- }
- };
- // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
- // Each of the component 32x32 -> 64 products
- const plolo: u64 = S.loWord(a) * S.loWord(b);
- const plohi: u64 = S.loWord(a) * S.hiWord(b);
- const philo: u64 = S.hiWord(a) * S.loWord(b);
- const phihi: u64 = S.hiWord(a) * S.hiWord(b);
- // Sum terms that contribute to lo in a way that allows us to get the carry
- const r0: u64 = S.loWord(plolo);
- const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
- lo.* = r0 +% (r1 << 32);
- // Sum terms contributing to hi with the carry from lo
- hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
- },
- u128 => {
- const Word_LoMask = @as(u64, 0x00000000ffffffff);
- const Word_HiMask = @as(u64, 0xffffffff00000000);
- const Word_FullMask = @as(u64, 0xffffffffffffffff);
- const S = struct {
- fn Word_1(x: u128) u64 {
- return @truncate(u32, x >> 96);
- }
- fn Word_2(x: u128) u64 {
- return @truncate(u32, x >> 64);
- }
- fn Word_3(x: u128) u64 {
- return @truncate(u32, x >> 32);
- }
- fn Word_4(x: u128) u64 {
- return @truncate(u32, x);
- }
- };
- // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
-
- const product11: u64 = S.Word_1(a) * S.Word_1(b);
- const product12: u64 = S.Word_1(a) * S.Word_2(b);
- const product13: u64 = S.Word_1(a) * S.Word_3(b);
- const product14: u64 = S.Word_1(a) * S.Word_4(b);
- const product21: u64 = S.Word_2(a) * S.Word_1(b);
- const product22: u64 = S.Word_2(a) * S.Word_2(b);
- const product23: u64 = S.Word_2(a) * S.Word_3(b);
- const product24: u64 = S.Word_2(a) * S.Word_4(b);
- const product31: u64 = S.Word_3(a) * S.Word_1(b);
- const product32: u64 = S.Word_3(a) * S.Word_2(b);
- const product33: u64 = S.Word_3(a) * S.Word_3(b);
- const product34: u64 = S.Word_3(a) * S.Word_4(b);
- const product41: u64 = S.Word_4(a) * S.Word_1(b);
- const product42: u64 = S.Word_4(a) * S.Word_2(b);
- const product43: u64 = S.Word_4(a) * S.Word_3(b);
- const product44: u64 = S.Word_4(a) * S.Word_4(b);
-
- const sum0: u128 = @as(u128, product44);
- const sum1: u128 = @as(u128, product34) +%
- @as(u128, product43);
- const sum2: u128 = @as(u128, product24) +%
- @as(u128, product33) +%
- @as(u128, product42);
- const sum3: u128 = @as(u128, product14) +%
- @as(u128, product23) +%
- @as(u128, product32) +%
- @as(u128, product41);
- const sum4: u128 = @as(u128, product13) +%
- @as(u128, product22) +%
- @as(u128, product31);
- const sum5: u128 = @as(u128, product12) +%
- @as(u128, product21);
- const sum6: u128 = @as(u128, product11);
-
- const r0: u128 = (sum0 & Word_FullMask) +%
- ((sum1 & Word_LoMask) << 32);
- const r1: u128 = (sum0 >> 64) +%
- ((sum1 >> 32) & Word_FullMask) +%
- (sum2 & Word_FullMask) +%
- ((sum3 << 32) & Word_HiMask);
-
- lo.* = r0 +% (r1 << 64);
- hi.* = (r1 >> 64) +%
- (sum1 >> 96) +%
- (sum2 >> 64) +%
- (sum3 >> 32) +%
- sum4 +%
- (sum5 << 32) +%
- (sum6 << 64);
- },
- else => @compileError("unsupported"),
- }
-}
-
-/// Returns a power-of-two integer type that is large enough to contain
-/// the significand of T, including an explicit integer bit
-fn PowerOfTwoSignificandZ(comptime T: type) type {
- const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
- return std.meta.Int(.unsigned, bits);
-}
-
-fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = PowerOfTwoSignificandZ(T);
- const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
-
- const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
- significand.* <<= @intCast(math.Log2Int(Z), shift);
- return @as(i32, 1) - shift;
-}
-
-// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
-//
-// This is analogous to an shr version of `@shlWithOverflow`
+/// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
+///
+/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(Z).Int.bits;
@@ -339,6 +182,22 @@ fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
return inexact;
}
+fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
+ const Z = PowerOfTwoSignificandZ(T);
+ const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
+
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(math.Log2Int(Z), shift);
+ return @as(i32, 1) - shift;
+}
+
+/// Returns a power-of-two integer type that is large enough to contain
+/// the significand of T, including an explicit integer bit
+fn PowerOfTwoSignificandZ(comptime T: type) type {
+ const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
+ return std.meta.Int(.unsigned, bits);
+}
+
test {
- _ = @import("mulXf3_test.zig");
+ _ = @import("mulf3_test.zig");
}
diff --git a/lib/compiler_rt/mulXf3_test.zig b/lib/compiler_rt/mulf3_test.zig
index 6b4f8ca953..203745e632 100644
--- a/lib/compiler_rt/mulXf3_test.zig
+++ b/lib/compiler_rt/mulf3_test.zig
@@ -7,10 +7,10 @@ const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
-const __multf3 = @import("mulXf3.zig").__multf3;
-const __mulxf3 = @import("mulXf3.zig").__mulxf3;
-const __muldf3 = @import("mulXf3.zig").__muldf3;
-const __mulsf3 = @import("mulXf3.zig").__mulsf3;
+const __multf3 = @import("multf3.zig").__multf3;
+const __mulxf3 = @import("mulxf3.zig").__mulxf3;
+const __muldf3 = @import("muldf3.zig").__muldf3;
+const __mulsf3 = @import("mulsf3.zig").__mulsf3;
// return true if equal
// use two 64-bit integers intead of one 128-bit integer
diff --git a/lib/compiler_rt/mulo.zig b/lib/compiler_rt/mulo.zig
index 78590e5ce1..cd2d127c34 100644
--- a/lib/compiler_rt/mulo.zig
+++ b/lib/compiler_rt/mulo.zig
@@ -1,6 +1,15 @@
-const builtin = @import("builtin");
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulosi4, .{ .name = "__mulosi4", .linkage = common.linkage });
+ @export(__mulodi4, .{ .name = "__mulodi4", .linkage = common.linkage });
+ @export(__muloti4, .{ .name = "__muloti4", .linkage = common.linkage });
+}
// mulo - multiplication overflow
// * return a*%b.
@@ -9,7 +18,6 @@ const math = std.math;
// - muloXi4_genericFast for 2*bitsize <= usize
inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
overflow.* = 0;
const min = math.minInt(ST);
var res: ST = a *% b;
@@ -24,7 +32,6 @@ inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int
}
inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
overflow.* = 0;
const EST = switch (ST) {
i32 => i64,
diff --git a/lib/compiler_rt/mulsf3.zig b/lib/compiler_rt/mulsf3.zig
new file mode 100644
index 0000000000..3294f5b1c7
--- /dev/null
+++ b/lib/compiler_rt/mulsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = common.linkage });
+ } else {
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
+ return mulf3(f32, a, b);
+}
+
+fn __aeabi_fmul(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return mulf3(f32, a, b);
+}
diff --git a/lib/compiler_rt/multf3.zig b/lib/compiler_rt/multf3.zig
new file mode 100644
index 0000000000..d4449ab72e
--- /dev/null
+++ b/lib/compiler_rt/multf3.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__mulkf3, .{ .name = "__mulkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = common.linkage });
+ } else {
+ @export(__multf3, .{ .name = "__multf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
+
+fn __mulkf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
+
+fn _Qp_mul(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+ c.* = mulf3(f128, a.*, b.*);
+}
diff --git a/lib/compiler_rt/multi3.zig b/lib/compiler_rt/multi3.zig
index a088dbcf9e..ba41cb7917 100644
--- a/lib/compiler_rt/multi3.zig
+++ b/lib/compiler_rt/multi3.zig
@@ -1,31 +1,52 @@
-const compiler_rt = @import("../compiler_rt.zig");
+//! Ported from git@github.com:llvm-project/llvm-project-20170507.git
+//! ae684fad6d34858c014c94da69c15e7774a633c3
+//! 2018-08-13
+
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
+const arch = builtin.cpu.arch;
const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
+
+pub const panic = common.panic;
-// Ported from git@github.com:llvm-project/llvm-project-20170507.git
-// ae684fad6d34858c014c94da69c15e7774a633c3
-// 2018-08-13
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ } else {
+ @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
+ }
+}
pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
- @setRuntimeSafety(is_test);
+ return mul(a, b);
+}
+
+const v128 = @Vector(2, u64);
+
+fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, mul(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn mul(a: i128, b: i128) i128 {
const x = twords{ .all = a };
const y = twords{ .all = b };
- var r = twords{ .all = __mulddi3(x.s.low, y.s.low) };
+ var r = twords{ .all = mulddi3(x.s.low, y.s.low) };
r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
return r.all;
}
-const v128 = @Vector(2, u64);
-pub fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
- @bitCast(i128, a),
- @bitCast(i128, b),
- }));
-}
-
-fn __mulddi3(a: u64, b: u64) i128 {
+fn mulddi3(a: u64, b: u64) i128 {
const bits_in_dword_2 = (@sizeOf(i64) * 8) / 2;
const lower_mask = ~@as(u64, 0) >> bits_in_dword_2;
var r: twords = undefined;
diff --git a/lib/compiler_rt/mulxf3.zig b/lib/compiler_rt/mulxf3.zig
new file mode 100644
index 0000000000..353d27c290
--- /dev/null
+++ b/lib/compiler_rt/mulxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage });
+}
+
+pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
+ return mulf3(f80, a, b);
+}
diff --git a/lib/compiler_rt/negXf2.zig b/lib/compiler_rt/negXf2.zig
index 06528b7570..bcff3660f4 100644
--- a/lib/compiler_rt/negXf2.zig
+++ b/lib/compiler_rt/negXf2.zig
@@ -1,24 +1,36 @@
const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = common.linkage });
+ @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = common.linkage });
+ } else {
+ @export(__negsf2, .{ .name = "__negsf2", .linkage = common.linkage });
+ @export(__negdf2, .{ .name = "__negdf2", .linkage = common.linkage });
+ }
+}
pub fn __negsf2(a: f32) callconv(.C) f32 {
return negXf2(f32, a);
}
-pub fn __negdf2(a: f64) callconv(.C) f64 {
- return negXf2(f64, a);
+fn __aeabi_fneg(a: f32) callconv(.AAPCS) f32 {
+ return negXf2(f32, a);
}
-pub fn __aeabi_fneg(arg: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __negsf2, .{arg});
+pub fn __negdf2(a: f64) callconv(.C) f64 {
+ return negXf2(f64, a);
}
-pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __negdf2, .{arg});
+fn __aeabi_dneg(a: f64) callconv(.AAPCS) f64 {
+ return negXf2(f64, a);
}
-fn negXf2(comptime T: type, a: T) T {
+inline fn negXf2(comptime T: type, a: T) T {
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
diff --git a/lib/compiler_rt/negXi2.zig b/lib/compiler_rt/negXi2.zig
index 15102b5df7..086f80c6b3 100644
--- a/lib/compiler_rt/negXi2.zig
+++ b/lib/compiler_rt/negXi2.zig
@@ -1,19 +1,21 @@
+//! neg - negate (the number)
+//! - negXi2 for unoptimized little and big endian
+//! sfffffff = 2^31-1
+//! two's complement inverting bits and add 1 would result in -INT_MIN == 0
+//! => -INT_MIN = -2^31 forbidden
+//! * size optimized builds
+//! * machines that dont support carry operations
+
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
-// neg - negate (the number)
-// - negXi2 for unoptimized little and big endian
-
-// sfffffff = 2^31-1
-// two's complement inverting bits and add 1 would result in -INT_MIN == 0
-// => -INT_MIN = -2^31 forbidden
-
-// * size optimized builds
-// * machines that dont support carry operations
+pub const panic = common.panic;
-inline fn negXi2(comptime T: type, a: T) T {
- @setRuntimeSafety(builtin.is_test);
- return -a;
+comptime {
+ @export(__negsi2, .{ .name = "__negsi2", .linkage = common.linkage });
+ @export(__negdi2, .{ .name = "__negdi2", .linkage = common.linkage });
+ @export(__negti2, .{ .name = "__negti2", .linkage = common.linkage });
}
pub fn __negsi2(a: i32) callconv(.C) i32 {
@@ -28,6 +30,10 @@ pub fn __negti2(a: i128) callconv(.C) i128 {
return negXi2(i128, a);
}
+inline fn negXi2(comptime T: type, a: T) T {
+ return -a;
+}
+
test {
_ = @import("negsi2_test.zig");
_ = @import("negdi2_test.zig");
diff --git a/lib/compiler_rt/negv.zig b/lib/compiler_rt/negv.zig
index 09abb040d5..361cd80ee7 100644
--- a/lib/compiler_rt/negv.zig
+++ b/lib/compiler_rt/negv.zig
@@ -1,20 +1,16 @@
-// negv - negate oVerflow
-// * @panic, if result can not be represented
-// - negvXi4_generic for unoptimized version
+//! negv - negate oVerflow
+//! * @panic, if result can not be represented
+//! - negvXi4_generic for unoptimized version
+const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
-// assume -0 == 0 is gracefully handled by the hardware
-inline fn negvXi(comptime ST: type, a: ST) ST {
- const UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
- else => unreachable,
- };
- const N: UT = @bitSizeOf(ST);
- const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
- if (a == min)
- @panic("compiler_rt negv: overflow");
- return -a;
+pub const panic = common.panic;
+
+comptime {
+ @export(__negvsi2, .{ .name = "__negvsi2", .linkage = common.linkage });
+ @export(__negvdi2, .{ .name = "__negvdi2", .linkage = common.linkage });
+ @export(__negvti2, .{ .name = "__negvti2", .linkage = common.linkage });
}
pub fn __negvsi2(a: i32) callconv(.C) i32 {
@@ -29,6 +25,20 @@ pub fn __negvti2(a: i128) callconv(.C) i128 {
return negvXi(i128, a);
}
+inline fn negvXi(comptime ST: type, a: ST) ST {
+ const UT = switch (ST) {
+ i32 => u32,
+ i64 => u64,
+ i128 => u128,
+ else => unreachable,
+ };
+ const N: UT = @bitSizeOf(ST);
+ const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
+ if (a == min)
+ @panic("compiler_rt negv: overflow");
+ return -a;
+}
+
test {
_ = @import("negvsi2_test.zig");
_ = @import("negvdi2_test.zig");
diff --git a/lib/compiler_rt/os_version_check.zig b/lib/compiler_rt/os_version_check.zig
index 55617dec75..2c6cdb54dc 100644
--- a/lib/compiler_rt/os_version_check.zig
+++ b/lib/compiler_rt/os_version_check.zig
@@ -1,5 +1,14 @@
-const testing = @import("std").testing;
+const std = @import("std");
+const testing = std.testing;
const builtin = @import("builtin");
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ if (builtin.os.tag.isDarwin()) {
+ @export(__isPlatformVersionAtLeast, .{ .name = "__isPlatformVersionAtLeast", .linkage = linkage });
+ }
+}
// Ported from llvm-project 13.0.0 d7b669b3a30345cfcdb2fde2af6f48aa4b94845d
//
@@ -16,30 +25,32 @@ const builtin = @import("builtin");
// the newer codepath, which merely calls out to the Darwin _availability_version_check API which is
// available on macOS 10.15+, iOS 13+, tvOS 13+ and watchOS 6+.
-inline fn constructVersion(major: u32, minor: u32, subminor: u32) u32 {
- return ((major & 0xffff) << 16) | ((minor & 0xff) << 8) | (subminor & 0xff);
-}
+const __isPlatformVersionAtLeast = if (builtin.os.tag.isDarwin()) struct {
+ inline fn constructVersion(major: u32, minor: u32, subminor: u32) u32 {
+ return ((major & 0xffff) << 16) | ((minor & 0xff) << 8) | (subminor & 0xff);
+ }
-// Darwin-only
-pub fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
- const build_version = dyld_build_version_t{
- .platform = platform,
- .version = constructVersion(major, minor, subminor),
- };
- return @boolToInt(_availability_version_check(1, &[_]dyld_build_version_t{build_version}));
-}
+ // Darwin-only
+ fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
+ const build_version = dyld_build_version_t{
+ .platform = platform,
+ .version = constructVersion(major, minor, subminor),
+ };
+ return @boolToInt(_availability_version_check(1, &[_]dyld_build_version_t{build_version}));
+ }
-// _availability_version_check darwin API support.
-const dyld_platform_t = u32;
-const dyld_build_version_t = extern struct {
- platform: dyld_platform_t,
- version: u32,
-};
-// Darwin-only
-extern "c" fn _availability_version_check(count: u32, versions: [*c]const dyld_build_version_t) bool;
+ // _availability_version_check darwin API support.
+ const dyld_platform_t = u32;
+ const dyld_build_version_t = extern struct {
+ platform: dyld_platform_t,
+ version: u32,
+ };
+ // Darwin-only
+ extern "c" fn _availability_version_check(count: u32, versions: [*c]const dyld_build_version_t) bool;
+}.__isPlatformVersionAtLeast else struct {};
test "isPlatformVersionAtLeast" {
- if (!builtin.os.tag.isDarwin()) return error.SkipZigTest;
+ if (!comptime builtin.os.tag.isDarwin()) return error.SkipZigTest;
// Note: this test depends on the actual host OS version since it is merely calling into the
// native Darwin API.
diff --git a/lib/compiler_rt/parity.zig b/lib/compiler_rt/parity.zig
index ae634b0790..2f48a38bff 100644
--- a/lib/compiler_rt/parity.zig
+++ b/lib/compiler_rt/parity.zig
@@ -1,12 +1,31 @@
+//! parity - if number of bits set is even => 0, else => 1
+//! - pariytXi2_generic for big and little endian
+
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
-// parity - if number of bits set is even => 0, else => 1
-// - pariytXi2_generic for big and little endian
+pub const panic = common.panic;
-inline fn parityXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
+comptime {
+ @export(__paritysi2, .{ .name = "__paritysi2", .linkage = common.linkage });
+ @export(__paritydi2, .{ .name = "__paritydi2", .linkage = common.linkage });
+ @export(__parityti2, .{ .name = "__parityti2", .linkage = common.linkage });
+}
+
+pub fn __paritysi2(a: i32) callconv(.C) i32 {
+ return parityXi2(i32, a);
+}
+
+pub fn __paritydi2(a: i64) callconv(.C) i32 {
+ return parityXi2(i64, a);
+}
+
+pub fn __parityti2(a: i128) callconv(.C) i32 {
+ return parityXi2(i128, a);
+}
+inline fn parityXi2(comptime T: type, a: T) i32 {
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -23,18 +42,6 @@ inline fn parityXi2(comptime T: type, a: T) i32 {
return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
}
-pub fn __paritysi2(a: i32) callconv(.C) i32 {
- return parityXi2(i32, a);
-}
-
-pub fn __paritydi2(a: i64) callconv(.C) i32 {
- return parityXi2(i64, a);
-}
-
-pub fn __parityti2(a: i128) callconv(.C) i32 {
- return parityXi2(i128, a);
-}
-
test {
_ = @import("paritysi2_test.zig");
_ = @import("paritydi2_test.zig");
diff --git a/lib/compiler_rt/popcount.zig b/lib/compiler_rt/popcount.zig
index 362b232fb8..803e93f35a 100644
--- a/lib/compiler_rt/popcount.zig
+++ b/lib/compiler_rt/popcount.zig
@@ -1,17 +1,36 @@
+//! popcount - population count
+//! counts the number of 1 bits
+//! SWAR-Popcount: count bits of duos, aggregate to nibbles, and bytes inside
+//! x-bit register in parallel to sum up all bytes
+//! SWAR-Masks and factors can be defined as 2-adic fractions
+//! TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
+//! subsubsection "Working with the rightmost bits" and "Sideways addition".
+
const builtin = @import("builtin");
const std = @import("std");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = common.linkage });
+ @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = common.linkage });
+ @export(__popcountti2, .{ .name = "__popcountti2", .linkage = common.linkage });
+}
-// popcount - population count
-// counts the number of 1 bits
+pub fn __popcountsi2(a: i32) callconv(.C) i32 {
+ return popcountXi2(i32, a);
+}
-// SWAR-Popcount: count bits of duos, aggregate to nibbles, and bytes inside
-// x-bit register in parallel to sum up all bytes
-// SWAR-Masks and factors can be defined as 2-adic fractions
-// TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
-// subsubsection "Working with the rightmost bits" and "Sideways addition".
+pub fn __popcountdi2(a: i64) callconv(.C) i32 {
+ return popcountXi2(i64, a);
+}
+
+pub fn __popcountti2(a: i128) callconv(.C) i32 {
+ return popcountXi2(i128, a);
+}
inline fn popcountXi2(comptime ST: type, a: ST) i32 {
- @setRuntimeSafety(builtin.is_test);
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@@ -30,18 +49,6 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 {
return @intCast(i32, x);
}
-pub fn __popcountsi2(a: i32) callconv(.C) i32 {
- return popcountXi2(i32, a);
-}
-
-pub fn __popcountdi2(a: i64) callconv(.C) i32 {
- return popcountXi2(i64, a);
-}
-
-pub fn __popcountti2(a: i128) callconv(.C) i32 {
- return popcountXi2(i128, a);
-}
-
test {
_ = @import("popcountsi2_test.zig");
_ = @import("popcountdi2_test.zig");
diff --git a/lib/compiler_rt/round.zig b/lib/compiler_rt/round.zig
index 4f3266e00c..acd26d8823 100644
--- a/lib/compiler_rt/round.zig
+++ b/lib/compiler_rt/round.zig
@@ -1,12 +1,27 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/roundf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/round.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/roundf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/round.c
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__roundh, .{ .name = "__roundh", .linkage = common.linkage });
+ @export(roundf, .{ .name = "roundf", .linkage = common.linkage });
+ @export(round, .{ .name = "round", .linkage = common.linkage });
+ @export(__roundx, .{ .name = "__roundx", .linkage = common.linkage });
+ const roundq_sym_name = if (common.want_ppc_abi) "roundf128" else "roundq";
+ @export(roundq, .{ .name = roundq_sym_name, .linkage = common.linkage });
+ @export(roundl, .{ .name = "roundl", .linkage = common.linkage });
+}
pub fn __roundh(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/shift.zig b/lib/compiler_rt/shift.zig
index edcf246daf..ee8b634fbb 100644
--- a/lib/compiler_rt/shift.zig
+++ b/lib/compiler_rt/shift.zig
@@ -1,13 +1,33 @@
const std = @import("std");
+const builtin = @import("builtin");
const Log2Int = std.math.Log2Int;
-const native_endian = @import("builtin").cpu.arch.endian();
+const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__ashlti3, .{ .name = "__ashlti3", .linkage = common.linkage });
+ @export(__ashrti3, .{ .name = "__ashrti3", .linkage = common.linkage });
+ @export(__lshrti3, .{ .name = "__lshrti3", .linkage = common.linkage });
+
+ if (common.want_aeabi) {
+ @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = common.linkage });
+ @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = common.linkage });
+ @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = common.linkage });
+ } else {
+ @export(__ashldi3, .{ .name = "__ashldi3", .linkage = common.linkage });
+ @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = common.linkage });
+ @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = common.linkage });
+ }
+}
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
- pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
- pub const HalfTU = std.meta.Int(.unsigned, bits);
- pub const HalfTS = std.meta.Int(.signed, bits);
- pub const HalfT = if (signed_half) HalfTS else HalfTU;
+ const bits = @divExact(@typeInfo(T).Int.bits, 2);
+ const HalfTU = std.meta.Int(.unsigned, bits);
+ const HalfTS = std.meta.Int(.signed, bits);
+ const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
@@ -19,7 +39,7 @@ fn Dwords(comptime T: type, comptime signed_half: bool) type {
// Arithmetic shift left
// Precondition: 0 <= b < bits_in_dword
-pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
+inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -42,7 +62,7 @@ pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
// Arithmetic shift right
// Precondition: 0 <= b < T.bit_count
-pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
+inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, true);
const S = Log2Int(dwords.HalfT);
@@ -69,7 +89,7 @@ pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
// Logical shift right
// Precondition: 0 <= b < T.bit_count
-pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
+inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -93,30 +113,34 @@ pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
pub fn __ashldi3(a: i64, b: i32) callconv(.C) i64 {
return ashlXi3(i64, a, b);
}
+fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return ashlXi3(i64, a, b);
+}
+
pub fn __ashlti3(a: i128, b: i32) callconv(.C) i128 {
return ashlXi3(i128, a, b);
}
+
pub fn __ashrdi3(a: i64, b: i32) callconv(.C) i64 {
return ashrXi3(i64, a, b);
}
+fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return ashrXi3(i64, a, b);
+}
+
pub fn __ashrti3(a: i128, b: i32) callconv(.C) i128 {
return ashrXi3(i128, a, b);
}
+
pub fn __lshrdi3(a: i64, b: i32) callconv(.C) i64 {
return lshrXi3(i64, a, b);
}
-pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
- return lshrXi3(i128, a, b);
+fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return lshrXi3(i64, a, b);
}
-pub fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
- return ashlXi3(i64, a, b);
-}
-pub fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return ashrXi3(i64, a, b);
-}
-pub fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return lshrXi3(i64, a, b);
+pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
+ return lshrXi3(i128, a, b);
}
test {
diff --git a/lib/compiler_rt/sin.zig b/lib/compiler_rt/sin.zig
index 20259bc309..1b93aab948 100644
--- a/lib/compiler_rt/sin.zig
+++ b/lib/compiler_rt/sin.zig
@@ -1,17 +1,32 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/sinf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/sin.c
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/sinf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/sin.c
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
const trig = @import("trig.zig");
const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+pub const panic = common.panic;
+
+comptime {
+ @export(__sinh, .{ .name = "__sinh", .linkage = common.linkage });
+ @export(sinf, .{ .name = "sinf", .linkage = common.linkage });
+ @export(sin, .{ .name = "sin", .linkage = common.linkage });
+ @export(__sinx, .{ .name = "__sinx", .linkage = common.linkage });
+ const sinq_sym_name = if (common.want_ppc_abi) "sinf128" else "sinq";
+ @export(sinq, .{ .name = sinq_sym_name, .linkage = common.linkage });
+ @export(sinl, .{ .name = "sinl", .linkage = common.linkage });
+}
+
pub fn __sinh(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
return @floatCast(f16, sinf(x));
diff --git a/lib/compiler_rt/sincos.zig b/lib/compiler_rt/sincos.zig
index 8bc5b83ee5..c839356a36 100644
--- a/lib/compiler_rt/sincos.zig
+++ b/lib/compiler_rt/sincos.zig
@@ -1,10 +1,23 @@
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
-const sin = @import("sin.zig");
-const cos = @import("cos.zig");
const trig = @import("trig.zig");
const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__sincosh, .{ .name = "__sincosh", .linkage = common.linkage });
+ @export(sincosf, .{ .name = "sincosf", .linkage = common.linkage });
+ @export(sincos, .{ .name = "sincos", .linkage = common.linkage });
+ @export(__sincosx, .{ .name = "__sincosx", .linkage = common.linkage });
+ const sincosq_sym_name = if (common.want_ppc_abi) "sincosf128" else "sincosq";
+ @export(sincosq, .{ .name = sincosq_sym_name, .linkage = common.linkage });
+ @export(sincosl, .{ .name = "sincosl", .linkage = common.linkage });
+}
pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
// TODO: more efficient implementation
@@ -192,7 +205,7 @@ pub fn sincosl(x: c_longdouble, r_sin: *c_longdouble, r_cos: *c_longdouble) call
}
}
-const rem_pio2_generic = @compileError("TODO");
+pub const rem_pio2_generic = @compileError("TODO");
/// Ported from musl sincosl.c. Needs the following dependencies to be complete:
/// * rem_pio2_generic ported from __rem_pio2l.c
diff --git a/lib/compiler_rt/sparc.zig b/lib/compiler_rt/sparc.zig
deleted file mode 100644
index 3b33afd29a..0000000000
--- a/lib/compiler_rt/sparc.zig
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-// SPARC uses a different naming scheme for its support routines so we map it here to the x86 name.
-
-const std = @import("std");
-const builtin = @import("builtin");
-
-// The SPARC Architecture Manual, Version 9:
-// A.13 Floating-Point Compare
-const FCMP = enum(i32) {
- Equal = 0,
- Less = 1,
- Greater = 2,
- Unordered = 3,
-};
-
-// Basic arithmetic
-
-pub fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("addXf3.zig").__addtf3(a.*, b.*);
-}
-
-pub fn _Qp_div(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("divtf3.zig").__divtf3(a.*, b.*);
-}
-
-pub fn _Qp_mul(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("mulXf3.zig").__multf3(a.*, b.*);
-}
-
-pub fn _Qp_sub(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("addXf3.zig").__subtf3(a.*, b.*);
-}
-
-// Comparison
-
-pub fn _Qp_cmp(a: *f128, b: *f128) callconv(.C) i32 {
- return @enumToInt(@import("compareXf2.zig").cmp(f128, FCMP, a.*, b.*));
-}
-
-pub fn _Qp_feq(a: *f128, b: *f128) callconv(.C) bool {
- return _Qp_cmp(a, b) == @enumToInt(FCMP.Equal);
-}
-
-pub fn _Qp_fne(a: *f128, b: *f128) callconv(.C) bool {
- return _Qp_cmp(a, b) != @enumToInt(FCMP.Equal);
-}
-
-pub fn _Qp_flt(a: *f128, b: *f128) callconv(.C) bool {
- return _Qp_cmp(a, b) == @enumToInt(FCMP.Less);
-}
-
-pub fn _Qp_fle(a: *f128, b: *f128) callconv(.C) bool {
- const cmp = _Qp_cmp(a, b);
- return cmp == @enumToInt(FCMP.Less) or cmp == @enumToInt(FCMP.Equal);
-}
-
-pub fn _Qp_fgt(a: *f128, b: *f128) callconv(.C) bool {
- return _Qp_cmp(a, b) == @enumToInt(FCMP.Greater);
-}
-
-pub fn _Qp_fge(a: *f128, b: *f128) callconv(.C) bool {
- const cmp = _Qp_cmp(a, b);
- return cmp == @enumToInt(FCMP.Greater) or cmp == @enumToInt(FCMP.Equal);
-}
-
-// Conversion
-
-pub fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
- c.* = @import("floatXiYf.zig").__floatsitf(a);
-}
-
-pub fn _Qp_uitoq(c: *f128, a: u32) callconv(.C) void {
- c.* = @import("floatXiYf.zig").__floatunsitf(a);
-}
-
-pub fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
- c.* = @import("floatXiYf.zig").__floatditf(a);
-}
-
-pub fn _Qp_uxtoq(c: *f128, a: u64) callconv(.C) void {
- c.* = @import("floatXiYf.zig").__floatunditf(a);
-}
-
-pub fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
- c.* = @import("extendXfYf2.zig").__extendsftf2(a);
-}
-
-pub fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
- c.* = @import("extendXfYf2.zig").__extenddftf2(a);
-}
-
-pub fn _Qp_qtoi(a: *f128) callconv(.C) i32 {
- return @import("fixXfYi.zig").__fixtfsi(a.*);
-}
-
-pub fn _Qp_qtoui(a: *f128) callconv(.C) u32 {
- return @import("fixXfYi.zig").__fixunstfsi(a.*);
-}
-
-pub fn _Qp_qtox(a: *f128) callconv(.C) i64 {
- return @import("fixXfYi.zig").__fixtfdi(a.*);
-}
-
-pub fn _Qp_qtoux(a: *f128) callconv(.C) u64 {
- return @import("fixXfYi.zig").__fixunstfdi(a.*);
-}
-
-pub fn _Qp_qtos(a: *f128) callconv(.C) f32 {
- return @import("truncXfYf2.zig").__trunctfsf2(a.*);
-}
-
-pub fn _Qp_qtod(a: *f128) callconv(.C) f64 {
- return @import("truncXfYf2.zig").__trunctfdf2(a.*);
-}
diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig
index 8d43949f99..01b09213fe 100644
--- a/lib/compiler_rt/sqrt.zig
+++ b/lib/compiler_rt/sqrt.zig
@@ -1,5 +1,20 @@
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__sqrth, .{ .name = "__sqrth", .linkage = common.linkage });
+ @export(sqrtf, .{ .name = "sqrtf", .linkage = common.linkage });
+ @export(sqrt, .{ .name = "sqrt", .linkage = common.linkage });
+ @export(__sqrtx, .{ .name = "__sqrtx", .linkage = common.linkage });
+ const sqrtq_sym_name = if (common.want_ppc_abi) "sqrtf128" else "sqrtq";
+ @export(sqrtq, .{ .name = sqrtq_sym_name, .linkage = common.linkage });
+ @export(sqrtl, .{ .name = "sqrtl", .linkage = common.linkage });
+}
pub fn __sqrth(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/stack_probe.zig b/lib/compiler_rt/stack_probe.zig
index 90919dcbb8..5ebb851825 100644
--- a/lib/compiler_rt/stack_probe.zig
+++ b/lib/compiler_rt/stack_probe.zig
@@ -1,4 +1,43 @@
-const native_arch = @import("builtin").cpu.arch;
+const std = @import("std");
+const builtin = @import("builtin");
+const os_tag = builtin.os.tag;
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const is_test = builtin.is_test;
+
+const is_gnu = abi.isGnu();
+const is_mingw = os_tag == .windows and is_gnu;
+
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+const strong_linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Strong;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ // Default stack-probe functions emitted by LLVM
+ if (is_mingw) {
+ @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
+ @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
+ } else if (!builtin.link_libc) {
+ // This symbols are otherwise exported by MSVCRT.lib
+ @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+
+ if (arch.isAARCH64()) {
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+ }
+
+ switch (arch) {
+ .i386,
+ .x86_64,
+ => {
+ @export(zig_probe_stack, .{ .name = "__zig_probe_stack", .linkage = linkage });
+ },
+ else => {},
+ }
+}
// Zig's own stack-probe routine (available only on x86 and x86_64)
pub fn zig_probe_stack() callconv(.Naked) void {
@@ -8,7 +47,7 @@ pub fn zig_probe_stack() callconv(.Naked) void {
// invalid so let's update it on the go, otherwise we'll get a segfault
// instead of triggering the stack growth.
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
// %rax = probe length, %rsp = stack pointer
asm volatile (
@@ -60,7 +99,7 @@ pub fn zig_probe_stack() callconv(.Naked) void {
fn win_probe_stack_only() void {
@setRuntimeSafety(false);
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
asm volatile (
\\ push %%rcx
@@ -105,7 +144,7 @@ fn win_probe_stack_only() void {
},
else => {},
}
- if (comptime native_arch.isAARCH64()) {
+ if (comptime arch.isAARCH64()) {
// NOTE: page size hardcoded to 4096 for now
asm volatile (
\\ lsl x16, x15, #4
@@ -127,7 +166,7 @@ fn win_probe_stack_only() void {
fn win_probe_stack_adjust_sp() void {
@setRuntimeSafety(false);
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
asm volatile (
\\ push %%rcx
@@ -201,9 +240,9 @@ pub fn _chkstk() callconv(.Naked) void {
}
pub fn __chkstk() callconv(.Naked) void {
@setRuntimeSafety(false);
- if (comptime native_arch.isAARCH64()) {
+ if (comptime arch.isAARCH64()) {
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
- } else switch (native_arch) {
+ } else switch (arch) {
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
diff --git a/lib/compiler_rt/subdf3.zig b/lib/compiler_rt/subdf3.zig
new file mode 100644
index 0000000000..9d62ffe480
--- /dev/null
+++ b/lib/compiler_rt/subdf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = common.linkage });
+ } else {
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = common.linkage });
+ }
+}
+
+fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
+
+fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/subo.zig b/lib/compiler_rt/subo.zig
index af28c6eead..a7dcf258aa 100644
--- a/lib/compiler_rt/subo.zig
+++ b/lib/compiler_rt/subo.zig
@@ -1,12 +1,31 @@
+//! subo - subtract overflow
+//! * return a-%b.
+//! * return if a-b overflows => 1 else => 0
+//! - suboXi4_generic as default
+
+const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
-// subo - subtract overflow
-// * return a-%b.
-// * return if a-b overflows => 1 else => 0
-// - suboXi4_generic as default
+comptime {
+ @export(__subosi4, .{ .name = "__subosi4", .linkage = common.linkage });
+ @export(__subodi4, .{ .name = "__subodi4", .linkage = common.linkage });
+ @export(__suboti4, .{ .name = "__suboti4", .linkage = common.linkage });
+}
+
+pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ return suboXi4_generic(i32, a, b, overflow);
+}
+pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ return suboXi4_generic(i64, a, b, overflow);
+}
+pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ return suboXi4_generic(i128, a, b, overflow);
+}
inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
overflow.* = 0;
var sum: ST = a -% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
@@ -21,16 +40,6 @@ inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST
return sum;
}
-pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
- return suboXi4_generic(i32, a, b, overflow);
-}
-pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
- return suboXi4_generic(i64, a, b, overflow);
-}
-pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
- return suboXi4_generic(i128, a, b, overflow);
-}
-
test {
_ = @import("subosi4_test.zig");
_ = @import("subodi4_test.zig");
diff --git a/lib/compiler_rt/subsf3.zig b/lib/compiler_rt/subsf3.zig
new file mode 100644
index 0000000000..472bccc899
--- /dev/null
+++ b/lib/compiler_rt/subsf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = common.linkage });
+ } else {
+ @export(__subsf3, .{ .name = "__subsf3", .linkage = common.linkage });
+ }
+}
+
+fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
+ const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+ return a + neg_b;
+}
+
+fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
+ const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/subtf3.zig b/lib/compiler_rt/subtf3.zig
new file mode 100644
index 0000000000..9477f96917
--- /dev/null
+++ b/lib/compiler_rt/subtf3.zig
@@ -0,0 +1,30 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__subkf3, .{ .name = "__subkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = common.linkage });
+ } else {
+ @export(__subtf3, .{ .name = "__subtf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
+ return sub(a, b);
+}
+
+fn __subkf3(a: f128, b: f128) callconv(.C) f128 {
+ return sub(a, b);
+}
+
+fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+ c.* = sub(a.*, b.*);
+}
+
+inline fn sub(a: f128, b: f128) f128 {
+ const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/subxf3.zig b/lib/compiler_rt/subxf3.zig
new file mode 100644
index 0000000000..a143f10ffe
--- /dev/null
+++ b/lib/compiler_rt/subxf3.zig
@@ -0,0 +1,15 @@
+const std = @import("std");
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__subxf3, .{ .name = "__subxf3", .linkage = common.linkage });
+}
+
+fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
+ var b_rep = std.math.break_f80(b);
+ b_rep.exp ^= 0x8000;
+ const neg_b = std.math.make_f80(b_rep);
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/tan.zig b/lib/compiler_rt/tan.zig
index d37022d918..9c44e4c682 100644
--- a/lib/compiler_rt/tan.zig
+++ b/lib/compiler_rt/tan.zig
@@ -1,11 +1,12 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/tanf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/tan.c
-// https://golang.org/src/math/tan.go
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/tanf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/tan.c
+//! https://golang.org/src/math/tan.go
const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
@@ -13,6 +14,21 @@ const kernel = @import("trig.zig");
const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__tanh, .{ .name = "__tanh", .linkage = common.linkage });
+ @export(tanf, .{ .name = "tanf", .linkage = common.linkage });
+ @export(tan, .{ .name = "tan", .linkage = common.linkage });
+ @export(__tanx, .{ .name = "__tanx", .linkage = common.linkage });
+ const tanq_sym_name = if (common.want_ppc_abi) "tanf128" else "tanq";
+ @export(tanq, .{ .name = tanq_sym_name, .linkage = common.linkage });
+ @export(tanl, .{ .name = "tanl", .linkage = common.linkage });
+}
+
pub fn __tanh(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
return @floatCast(f16, tanf(x));
diff --git a/lib/compiler_rt/trunc.zig b/lib/compiler_rt/trunc.zig
index d00df60d99..9ced5bc92c 100644
--- a/lib/compiler_rt/trunc.zig
+++ b/lib/compiler_rt/trunc.zig
@@ -1,12 +1,27 @@
-// Ported from musl, which is licensed under the MIT license:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/truncf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/trunc.c
+//! Ported from musl, which is MIT licensed.
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/truncf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/trunc.c
const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__trunch, .{ .name = "__trunch", .linkage = common.linkage });
+ @export(truncf, .{ .name = "truncf", .linkage = common.linkage });
+ @export(trunc, .{ .name = "trunc", .linkage = common.linkage });
+ @export(__truncx, .{ .name = "__truncx", .linkage = common.linkage });
+ const truncq_sym_name = if (common.want_ppc_abi) "truncf128" else "truncq";
+ @export(truncq, .{ .name = truncq_sym_name, .linkage = common.linkage });
+ @export(truncl, .{ .name = "truncl", .linkage = common.linkage });
+}
pub fn __trunch(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
diff --git a/lib/compiler_rt/trunc_f80.zig b/lib/compiler_rt/trunc_f80.zig
deleted file mode 100644
index 107874aeeb..0000000000
--- a/lib/compiler_rt/trunc_f80.zig
+++ /dev/null
@@ -1,173 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const native_arch = builtin.cpu.arch;
-const testing = std.testing;
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
-
-pub fn __truncxfhf2(a: f80) callconv(.C) F16T {
- return @bitCast(F16T, trunc(f16, a));
-}
-
-pub fn __truncxfsf2(a: f80) callconv(.C) f32 {
- return trunc(f32, a);
-}
-
-pub fn __truncxfdf2(a: f80) callconv(.C) f64 {
- return trunc(f64, a);
-}
-
-inline fn trunc(comptime dst_t: type, a: f80) dst_t {
- @setRuntimeSafety(builtin.is_test);
-
- const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
- const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
- const dst_sig_bits = std.math.floatMantissaBits(dst_t);
-
- const src_exp_bias = 16383;
-
- const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
- const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
-
- const dst_bits = @typeInfo(dst_t).Float.bits;
- const dst_exp_bits = dst_bits - dst_sig_bits - 1;
- const dst_inf_exp = (1 << dst_exp_bits) - 1;
- const dst_exp_bias = dst_inf_exp >> 1;
-
- const underflow = src_exp_bias + 1 - dst_exp_bias;
- const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
-
- const dst_qnan = 1 << (dst_sig_bits - 1);
- const dst_nan_mask = dst_qnan - 1;
-
- // Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
- const sign = a_rep.exp & 0x8000;
- a_rep.exp &= 0x7FFF;
- a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
- var abs_result: dst_rep_t = undefined;
-
- if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
- // The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
- // rounding and adjusting the exponent.
- abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
- abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
- abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
-
- const round_bits = a_rep.fraction & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- abs_result += 1;
- } else if (round_bits == halfway) {
- // Ties to even
- abs_result += abs_result & 1;
- }
- } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
- // a is NaN.
- // Conjure the result by beginning with infinity, setting the qNaN
- // bit and inserting the (truncated) trailing NaN field.
- abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
- abs_result |= dst_qnan;
- abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
- } else if (a_rep.exp >= overflow) {
- // a overflows to infinity.
- abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
- } else {
- // a underflows on conversion to the destination type or is an exact
- // zero. The result may be a denormal or zero. Extract the exponent
- // to get the shift amount for the denormalization.
- const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
-
- // Right shift by the denormalization amount with sticky.
- if (shift > src_sig_bits) {
- abs_result = 0;
- } else {
- const sticky = @boolToInt(a_rep.fraction << @intCast(u6, shift) != 0);
- const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
- abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
- const round_bits = denormalized_significand & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- abs_result += 1;
- } else if (round_bits == halfway) {
- // Ties to even
- abs_result += abs_result & 1;
- }
- }
- }
-
- const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
- return @bitCast(dst_t, result);
-}
-
-pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
- const src_sig_bits = std.math.floatMantissaBits(f128);
- const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
-
- // Various constants whose values follow from the type parameters.
- // Any reasonable optimizer will fold and propagate all of these.
- const src_bits = @typeInfo(f128).Float.bits;
- const src_exp_bits = src_bits - src_sig_bits - 1;
- const src_inf_exp = 0x7FFF;
-
- const src_inf = src_inf_exp << src_sig_bits;
- const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
- const src_abs_mask = src_sign_mask - 1;
- const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
- const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
-
- // Break a into a sign and representation of the absolute value
- const a_rep = @bitCast(u128, a);
- const a_abs = a_rep & src_abs_mask;
- const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
- const integer_bit = 1 << 63;
-
- var res: std.math.F80 = undefined;
-
- if (a_abs > src_inf) {
- // a is NaN.
- // Conjure the result by beginning with infinity, setting the qNaN
- // bit and inserting the (truncated) trailing NaN field.
- res.exp = 0x7fff;
- res.fraction = 0x8000000000000000;
- res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
- } else {
- // The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
- // rounding, adding the explicit integer bit, and adjusting the exponent
- res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit;
- res.exp = @truncate(u16, a_abs >> src_sig_bits);
-
- const round_bits = a_abs & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
- } else if (round_bits == halfway) {
- // Ties to even
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
- }
- if (res.exp == 0) res.fraction &= ~@as(u64, integer_bit); // Remove integer bit for de-normals
- }
-
- res.exp |= sign;
- return std.math.make_f80(res);
-}
-
-fn test__trunctfxf2(a: f128, expected: f80) !void {
- const x = __trunctfxf2(a);
- try testing.expect(x == expected);
-}
-
-test {
- try test__trunctfxf2(1.5, 1.5);
- try test__trunctfxf2(2.5, 2.5);
- try test__trunctfxf2(-2.5, -2.5);
- try test__trunctfxf2(0.0, 0.0);
-}
diff --git a/lib/compiler_rt/truncdfhf2.zig b/lib/compiler_rt/truncdfhf2.zig
new file mode 100644
index 0000000000..a2d3bf1402
--- /dev/null
+++ b/lib/compiler_rt/truncdfhf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = common.linkage });
+ } else {
+ @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f64, a));
+}
+
+fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
+ return @bitCast(common.F16T, truncf(f16, f64, a));
+}
diff --git a/lib/compiler_rt/truncdfsf2.zig b/lib/compiler_rt/truncdfsf2.zig
new file mode 100644
index 0000000000..126dfff0fd
--- /dev/null
+++ b/lib/compiler_rt/truncdfsf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = common.linkage });
+ } else {
+ @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
+ return truncf(f32, f64, a);
+}
+
+fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
+ return truncf(f32, f64, a);
+}
diff --git a/lib/compiler_rt/truncXfYf2.zig b/lib/compiler_rt/truncf.zig
index bf324269a6..c012bcee62 100644
--- a/lib/compiler_rt/truncXfYf2.zig
+++ b/lib/compiler_rt/truncf.zig
@@ -1,53 +1,6 @@
const std = @import("std");
-const builtin = @import("builtin");
-const native_arch = builtin.cpu.arch;
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-// TODO remove this; do this type selection in the language rather than
-// here in compiler-rt.
-pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
-
-pub fn __truncsfhf2(a: f32) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f32, a));
-}
-
-pub fn __truncdfhf2(a: f64) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f64, a));
-}
-
-pub fn __trunctfhf2(a: f128) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f128, a));
-}
-
-pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
- return truncXfYf2(f32, f128, a);
-}
-
-pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
- return truncXfYf2(f64, f128, a);
-}
-
-pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
- return truncXfYf2(f32, f64, a);
-}
-
-pub fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return truncXfYf2(f32, f64, a);
-}
-
-pub fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
- @setRuntimeSafety(false);
- return @bitCast(F16T, truncXfYf2(f16, f64, a));
-}
-
-pub fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
- @setRuntimeSafety(false);
- return @bitCast(F16T, truncXfYf2(f16, f32, a));
-}
-
-inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@@ -147,6 +100,88 @@ inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
return @bitCast(dst_t, result);
}
+pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
+ const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(dst_t);
+
+ const src_exp_bias = 16383;
+
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+
+ const dst_bits = @typeInfo(dst_t).Float.bits;
+ const dst_exp_bits = dst_bits - dst_sig_bits - 1;
+ const dst_inf_exp = (1 << dst_exp_bits) - 1;
+ const dst_exp_bias = dst_inf_exp >> 1;
+
+ const underflow = src_exp_bias + 1 - dst_exp_bias;
+ const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
+
+ const dst_qnan = 1 << (dst_sig_bits - 1);
+ const dst_nan_mask = dst_qnan - 1;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = std.math.break_f80(a);
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
+ var abs_result: dst_rep_t = undefined;
+
+ if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
+ abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
+ abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
+
+ const round_bits = a_rep.fraction & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ abs_result |= dst_qnan;
+ abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
+ } else if (a_rep.exp >= overflow) {
+ // a overflows to infinity.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > src_sig_bits) {
+ abs_result = 0;
+ } else {
+ const sticky = @boolToInt(a_rep.fraction << @intCast(u6, shift) != 0);
+ const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
+ abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
+ const round_bits = denormalized_significand & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ }
+ }
+
+ const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
+ return @bitCast(dst_t, result);
+}
+
test {
- _ = @import("truncXfYf2_test.zig");
+ _ = @import("truncf_test.zig");
}
diff --git a/lib/compiler_rt/truncXfYf2_test.zig b/lib/compiler_rt/truncf_test.zig
index 3f11dd0380..d4e93cd114 100644
--- a/lib/compiler_rt/truncXfYf2_test.zig
+++ b/lib/compiler_rt/truncf_test.zig
@@ -1,5 +1,13 @@
const std = @import("std");
-const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
+const testing = std.testing;
+
+const __truncsfhf2 = @import("truncsfhf2.zig").__truncsfhf2;
+const __truncdfhf2 = @import("truncdfhf2.zig").__truncdfhf2;
+const __truncdfsf2 = @import("truncdfsf2.zig").__truncdfsf2;
+const __trunctfhf2 = @import("trunctfhf2.zig").__trunctfhf2;
+const __trunctfsf2 = @import("trunctfsf2.zig").__trunctfsf2;
+const __trunctfdf2 = @import("trunctfdf2.zig").__trunctfdf2;
+const __trunctfxf2 = @import("trunctfxf2.zig").__trunctfxf2;
fn test__truncsfhf2(a: u32, expected: u16) !void {
const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a)));
@@ -64,8 +72,6 @@ test "truncsfhf2" {
try test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
}
-const __truncdfhf2 = @import("truncXfYf2.zig").__truncdfhf2;
-
fn test__truncdfhf2(a: f64, expected: u16) void {
const rep = @bitCast(u16, __truncdfhf2(a));
@@ -132,8 +138,6 @@ test "truncdfhf2" {
test__truncdfhf2(65536.0, 0x7c00);
}
-const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
-
fn test__trunctfsf2(a: f128, expected: u32) void {
const x = __trunctfsf2(a);
@@ -167,8 +171,6 @@ test "trunctfsf2" {
test__trunctfsf2(0x1.edcba9bb8c76a5a43dd21f334634p-435, 0x0);
}
-const __trunctfdf2 = @import("truncXfYf2.zig").__trunctfdf2;
-
fn test__trunctfdf2(a: f128, expected: u64) void {
const x = __trunctfdf2(a);
@@ -202,8 +204,6 @@ test "trunctfdf2" {
test__trunctfdf2(0x1.edcbff8ad76ab5bf46463233214fp-435, 0x24cedcbff8ad76ab);
}
-const __truncdfsf2 = @import("truncXfYf2.zig").__truncdfsf2;
-
fn test__truncdfsf2(a: f64, expected: u32) void {
const x = __truncdfsf2(a);
@@ -239,8 +239,6 @@ test "truncdfsf2" {
test__truncdfsf2(340282366920938463463374607431768211456.0, 0x7f800000);
}
-const __trunctfhf2 = @import("truncXfYf2.zig").__trunctfhf2;
-
fn test__trunctfhf2(a: f128, expected: u16) void {
const x = __trunctfhf2(a);
@@ -294,3 +292,15 @@ test "trunctfhf2" {
test__trunctfhf2(0x1.234eebb5faa678f4488693abcdefp+453, 0x7c00);
test__trunctfhf2(0x1.edcba9bb8c76a5a43dd21f334634p-43, 0x0);
}
+
+test "trunctfxf2" {
+ try test__trunctfxf2(1.5, 1.5);
+ try test__trunctfxf2(2.5, 2.5);
+ try test__trunctfxf2(-2.5, -2.5);
+ try test__trunctfxf2(0.0, 0.0);
+}
+
+fn test__trunctfxf2(a: f128, expected: f80) !void {
+ const x = __trunctfxf2(a);
+ try testing.expect(x == expected);
+}
diff --git a/lib/compiler_rt/truncsfhf2.zig b/lib/compiler_rt/truncsfhf2.zig
new file mode 100644
index 0000000000..489fb8658d
--- /dev/null
+++ b/lib/compiler_rt/truncsfhf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.gnu_f16_abi) {
+ @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage });
+ } else if (common.want_aeabi) {
+ @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = common.linkage });
+ } else {
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
+
+fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
+
+fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
diff --git a/lib/compiler_rt/trunctfdf2.zig b/lib/compiler_rt/trunctfdf2.zig
new file mode 100644
index 0000000000..e084d63d88
--- /dev/null
+++ b/lib/compiler_rt/trunctfdf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__trunckfdf2, .{ .name = "__trunckfdf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = common.linkage });
+ } else {
+ @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
+ return truncf(f64, f128, a);
+}
+
+fn __trunckfdf2(a: f128) callconv(.C) f64 {
+ return truncf(f64, f128, a);
+}
+
+fn _Qp_qtod(a: *const f128) callconv(.C) f64 {
+ return truncf(f64, f128, a.*);
+}
diff --git a/lib/compiler_rt/trunctfhf2.zig b/lib/compiler_rt/trunctfhf2.zig
new file mode 100644
index 0000000000..b764a78455
--- /dev/null
+++ b/lib/compiler_rt/trunctfhf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = common.linkage });
+}
+
+pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f128, a));
+}
diff --git a/lib/compiler_rt/trunctfsf2.zig b/lib/compiler_rt/trunctfsf2.zig
new file mode 100644
index 0000000000..0fcd5e1e08
--- /dev/null
+++ b/lib/compiler_rt/trunctfsf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__trunckfsf2, .{ .name = "__trunckfsf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = common.linkage });
+ } else {
+ @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
+ return truncf(f32, f128, a);
+}
+
+fn __trunckfsf2(a: f128) callconv(.C) f32 {
+ return truncf(f32, f128, a);
+}
+
+fn _Qp_qtos(a: *const f128) callconv(.C) f32 {
+ return truncf(f32, f128, a.*);
+}
diff --git a/lib/compiler_rt/trunctfxf2.zig b/lib/compiler_rt/trunctfxf2.zig
new file mode 100644
index 0000000000..731f58f192
--- /dev/null
+++ b/lib/compiler_rt/trunctfxf2.zig
@@ -0,0 +1,66 @@
+const math = @import("std").math;
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = common.linkage });
+}
+
+pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
+ const src_sig_bits = math.floatMantissaBits(f128);
+ const dst_sig_bits = math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const src_bits = @typeInfo(f128).Float.bits;
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = 0x7FFF;
+
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+
+ // Break a into a sign and representation of the absolute value
+ const a_rep = @bitCast(u128, a);
+ const a_abs = a_rep & src_abs_mask;
+ const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
+ const integer_bit = 1 << 63;
+
+ var res: math.F80 = undefined;
+
+ if (a_abs > src_inf) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ res.exp = 0x7fff;
+ res.fraction = 0x8000000000000000;
+ res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
+ } else {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding, adding the explicit integer bit, and adjusting the exponent
+ res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit;
+ res.exp = @truncate(u16, a_abs >> src_sig_bits);
+
+ const round_bits = a_abs & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ const carry = @boolToInt(@addWithOverflow(u64, res.fraction, 1, &res.fraction));
+ res.exp += carry;
+ res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ } else if (round_bits == halfway) {
+ // Ties to even
+ const carry = @boolToInt(@addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction));
+ res.exp += carry;
+ res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ }
+ if (res.exp == 0) res.fraction &= ~@as(u64, integer_bit); // Remove integer bit for de-normals
+ }
+
+ res.exp |= sign;
+ return math.make_f80(res);
+}
diff --git a/lib/compiler_rt/truncxfdf2.zig b/lib/compiler_rt/truncxfdf2.zig
new file mode 100644
index 0000000000..2b8eaaab8c
--- /dev/null
+++ b/lib/compiler_rt/truncxfdf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = common.linkage });
+}
+
+fn __truncxfdf2(a: f80) callconv(.C) f64 {
+ return trunc_f80(f64, a);
+}
diff --git a/lib/compiler_rt/truncxfhf2.zig b/lib/compiler_rt/truncxfhf2.zig
new file mode 100644
index 0000000000..75fdd17841
--- /dev/null
+++ b/lib/compiler_rt/truncxfhf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = common.linkage });
+}
+
+fn __truncxfhf2(a: f80) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, trunc_f80(f16, a));
+}
diff --git a/lib/compiler_rt/truncxfsf2.zig b/lib/compiler_rt/truncxfsf2.zig
new file mode 100644
index 0000000000..57c0cb7bdf
--- /dev/null
+++ b/lib/compiler_rt/truncxfsf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = common.linkage });
+}
+
+fn __truncxfsf2(a: f80) callconv(.C) f32 {
+ return trunc_f80(f32, a);
+}
diff --git a/lib/compiler_rt/udivmodti4.zig b/lib/compiler_rt/udivmodti4.zig
index be9ed8237b..911bf72eed 100644
--- a/lib/compiler_rt/udivmodti4.zig
+++ b/lib/compiler_rt/udivmodti4.zig
@@ -1,15 +1,36 @@
-const udivmod = @import("udivmod.zig").udivmod;
+const std = @import("std");
const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ } else {
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage });
+ }
+}
pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 {
- @setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}
-const v128 = @import("std").meta.Vector(2, u64);
-pub fn __udivmodti4_windows_x86_64(a: v128, b: v128, maybe_rem: ?*u128) callconv(.C) v128 {
- @setRuntimeSafety(builtin.is_test);
+const v128 = std.meta.Vector(2, u64);
+
+fn __udivmodti4_windows_x86_64(a: v128, b: v128, maybe_rem: ?*u128) callconv(.C) v128 {
return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem));
}
diff --git a/lib/compiler_rt/udivti3.zig b/lib/compiler_rt/udivti3.zig
index 52afa0420f..3e908176bc 100644
--- a/lib/compiler_rt/udivti3.zig
+++ b/lib/compiler_rt/udivti3.zig
@@ -1,13 +1,38 @@
-const udivmodti4 = @import("udivmodti4.zig");
+const std = @import("std");
const builtin = @import("builtin");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
+ }
+}
pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 {
- @setRuntimeSafety(builtin.is_test);
- return udivmodti4.__udivmodti4(a, b, null);
+ return udivmod(u128, a, b, null);
}
-const v128 = @import("std").meta.Vector(2, u64);
-pub fn __udivti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- @setRuntimeSafety(builtin.is_test);
- return udivmodti4.__udivmodti4_windows_x86_64(a, b, null);
+const v128 = std.meta.Vector(2, u64);
+
+fn __udivti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null));
}
diff --git a/lib/compiler_rt/umodti3.zig b/lib/compiler_rt/umodti3.zig
index 29eb572892..65058a599e 100644
--- a/lib/compiler_rt/umodti3.zig
+++ b/lib/compiler_rt/umodti3.zig
@@ -1,18 +1,42 @@
-const udivmodti4 = @import("udivmodti4.zig");
+const std = @import("std");
const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
+ }
+}
pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 {
- @setRuntimeSafety(builtin.is_test);
var r: u128 = undefined;
- _ = udivmodti4.__udivmodti4(a, b, &r);
+ _ = udivmod(u128, a, b, &r);
return r;
}
-const v128 = @import("std").meta.Vector(2, u64);
-pub fn __umodti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- return @bitCast(v128, @call(.{ .modifier = .always_inline }, __umodti3, .{
- @bitCast(u128, a),
- @bitCast(u128, b),
- }));
+const v128 = std.meta.Vector(2, u64);
+
+fn __umodti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ var r: u128 = undefined;
+ _ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r);
+ return @bitCast(v128, r);
}
diff --git a/lib/compiler_rt/unorddf2.zig b/lib/compiler_rt/unorddf2.zig
new file mode 100644
index 0000000000..66910a18bf
--- /dev/null
+++ b/lib/compiler_rt/unorddf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = common.linkage });
+ } else {
+ @export(__unorddf2, .{ .name = "__unorddf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
+ return comparef.unordcmp(f64, a, b);
+}
+
+fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return comparef.unordcmp(f64, a, b);
+}
diff --git a/lib/compiler_rt/unordsf2.zig b/lib/compiler_rt/unordsf2.zig
new file mode 100644
index 0000000000..78b388a75e
--- /dev/null
+++ b/lib/compiler_rt/unordsf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = common.linkage });
+ } else {
+ @export(__unordsf2, .{ .name = "__unordsf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
+ return comparef.unordcmp(f32, a, b);
+}
+
+fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return comparef.unordcmp(f32, a, b);
+}
diff --git a/lib/compiler_rt/unordtf2.zig b/lib/compiler_rt/unordtf2.zig
new file mode 100644
index 0000000000..41d1d7008e
--- /dev/null
+++ b/lib/compiler_rt/unordtf2.zig
@@ -0,0 +1,23 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__unordkf2, .{ .name = "__unordkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ // These exports are handled in cmptf2.zig because unordered comparisons
+ // are based on calling _Qp_cmp.
+ } else {
+ @export(__unordtf2, .{ .name = "__unordtf2", .linkage = common.linkage });
+ }
+}
+
+fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
+ return comparef.unordcmp(f128, a, b);
+}
+
+fn __unordkf2(a: f128, b: f128) callconv(.C) i32 {
+ return comparef.unordcmp(f128, a, b);
+}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 54d87faa7b..2646da2f6f 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -92,6 +92,9 @@ unwind_tables: bool,
test_evented_io: bool,
debug_compiler_runtime_libs: bool,
debug_compile_errors: bool,
+job_queued_compiler_rt_lib: bool = false,
+job_queued_compiler_rt_obj: bool = false,
+alloc_failure_occurred: bool = false,
c_source_files: []const CSourceFile,
clang_argv: []const []const u8,
@@ -129,11 +132,11 @@ libssp_static_lib: ?CRTFile = null,
/// Populated when we build the libc static library. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
libc_static_lib: ?CRTFile = null,
-/// Populated when we build the libcompiler_rt static library. A Job to build this is placed in the queue
-/// and resolved before calling linker.flush().
-compiler_rt_static_lib: ?CRTFile = null,
-/// Populated when we build the compiler_rt_obj object. A Job to build this is placed in the queue
-/// and resolved before calling linker.flush().
+/// Populated when we build the libcompiler_rt static library. A Job to build this is indicated
+/// by setting `job_queued_compiler_rt_lib` and resolved before calling linker.flush().
+compiler_rt_lib: ?CRTFile = null,
+/// Populated when we build the compiler_rt_obj object. A Job to build this is indicated
+/// by setting `job_queued_compiler_rt_obj` and resolved before calling linker.flush().
compiler_rt_obj: ?CRTFile = null,
glibc_so_files: ?glibc.BuiltSharedObjects = null,
@@ -175,7 +178,7 @@ pub const CRTFile = struct {
lock: Cache.Lock,
full_object_path: []const u8,
- fn deinit(self: *CRTFile, gpa: Allocator) void {
+ pub fn deinit(self: *CRTFile, gpa: Allocator) void {
self.lock.release();
gpa.free(self.full_object_path);
self.* = undefined;
@@ -223,8 +226,6 @@ const Job = union(enum) {
libcxxabi: void,
libtsan: void,
libssp: void,
- compiler_rt_lib: void,
- compiler_rt_obj: void,
/// needed when not linking libc and using LLVM for code generation because it generates
/// calls to, for example, memcpy and memset.
zig_libc: void,
@@ -1924,13 +1925,13 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
if (comp.bin_file.options.include_compiler_rt and capable_of_building_compiler_rt) {
if (is_exe_or_dyn_lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
- try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
+ comp.job_queued_compiler_rt_lib = true;
} else if (options.output_mode != .Obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// If build-obj with -fcompiler-rt is requested, that is handled specially
// elsewhere. In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
- try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} });
+ comp.job_queued_compiler_rt_obj = true;
}
}
if (needs_c_symbols) {
@@ -1978,7 +1979,7 @@ pub fn destroy(self: *Compilation) void {
if (self.libcxxabi_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
- if (self.compiler_rt_static_lib) |*crt_file| {
+ if (self.compiler_rt_lib) |*crt_file| {
crt_file.deinit(gpa);
}
if (self.compiler_rt_obj) |*crt_file| {
@@ -2020,6 +2021,7 @@ pub fn destroy(self: *Compilation) void {
}
pub fn clearMiscFailures(comp: *Compilation) void {
+ comp.alloc_failure_occurred = false;
for (comp.misc_failures.values()) |*value| {
value.deinit(comp.gpa);
}
@@ -2532,8 +2534,10 @@ pub fn makeBinFileWritable(self: *Compilation) !void {
return self.bin_file.makeWritable();
}
+/// This function is temporally single-threaded.
pub fn totalErrorCount(self: *Compilation) usize {
- var total: usize = self.failed_c_objects.count() + self.misc_failures.count();
+ var total: usize = self.failed_c_objects.count() + self.misc_failures.count() +
+ @boolToInt(self.alloc_failure_occurred);
if (self.bin_file.options.module) |module| {
total += module.failed_exports.count();
@@ -2590,6 +2594,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
return total;
}
+/// This function is temporally single-threaded.
pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var arena = std.heap.ArenaAllocator.init(self.gpa);
errdefer arena.deinit();
@@ -2622,6 +2627,9 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
for (self.misc_failures.values()) |*value| {
try AllErrors.addPlainWithChildren(&arena, &errors, value.msg, value.children);
}
+ if (self.alloc_failure_occurred) {
+ try AllErrors.addPlain(&arena, &errors, "memory allocation failure");
+ }
if (self.bin_file.options.module) |module| {
{
var it = module.failed_files.iterator();
@@ -2739,6 +2747,8 @@ pub fn performAllTheWork(
comp.work_queue_wait_group.reset();
defer comp.work_queue_wait_group.wait();
+ const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
+
{
const astgen_frame = tracy.namedFrame("astgen");
defer astgen_frame.end();
@@ -2783,7 +2793,6 @@ pub fn performAllTheWork(
}
}
- const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
if (!use_stage1) {
const outdated_and_deleted_decls_frame = tracy.namedFrame("outdated_and_deleted_decls");
defer outdated_and_deleted_decls_frame.end();
@@ -2826,6 +2835,16 @@ pub fn performAllTheWork(
}
break;
}
+
+ if (comp.job_queued_compiler_rt_lib) {
+ comp.job_queued_compiler_rt_lib = false;
+ buildCompilerRtOneShot(comp, .Lib, &comp.compiler_rt_lib);
+ }
+
+ if (comp.job_queued_compiler_rt_obj) {
+ comp.job_queued_compiler_rt_obj = false;
+ buildCompilerRtOneShot(comp, .Obj, &comp.compiler_rt_obj);
+ }
}
fn processOneJob(comp: *Compilation, job: Job) !void {
@@ -2996,7 +3015,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
module.semaPkg(pkg) catch |err| switch (err) {
error.CurrentWorkingDirectoryUnlinked,
error.Unexpected,
- => try comp.setMiscFailure(
+ => comp.lockAndSetMiscFailure(
.analyze_pkg,
"unexpected problem analyzing package '{s}'",
.{pkg.root_src_path},
@@ -3011,7 +3030,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
glibc.buildCRTFile(comp, crt_file) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(.glibc_crt_file, "unable to build glibc CRT file: {s}", .{
+ comp.lockAndSetMiscFailure(.glibc_crt_file, "unable to build glibc CRT file: {s}", .{
@errorName(err),
});
};
@@ -3022,7 +3041,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
glibc.buildSharedObjects(comp) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.glibc_shared_objects,
"unable to build glibc shared objects: {s}",
.{@errorName(err)},
@@ -3035,7 +3054,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
musl.buildCRTFile(comp, crt_file) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.musl_crt_file,
"unable to build musl CRT file: {s}",
.{@errorName(err)},
@@ -3048,7 +3067,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
mingw.buildCRTFile(comp, crt_file) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.mingw_crt_file,
"unable to build mingw-w64 CRT file: {s}",
.{@errorName(err)},
@@ -3062,7 +3081,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const link_lib = comp.bin_file.options.system_libs.keys()[index];
mingw.buildImportLib(comp, link_lib) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.windows_import_lib,
"unable to generate DLL import .lib file: {s}",
.{@errorName(err)},
@@ -3075,7 +3094,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
libunwind.buildStaticLib(comp) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.libunwind,
"unable to build libunwind: {s}",
.{@errorName(err)},
@@ -3088,7 +3107,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
libcxx.buildLibCXX(comp) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.libcxx,
"unable to build libcxx: {s}",
.{@errorName(err)},
@@ -3101,7 +3120,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
libcxx.buildLibCXXABI(comp) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.libcxxabi,
"unable to build libcxxabi: {s}",
.{@errorName(err)},
@@ -3114,7 +3133,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
libtsan.buildTsan(comp) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.libtsan,
"unable to build TSAN library: {s}",
.{@errorName(err)},
@@ -3127,51 +3146,13 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
wasi_libc.buildCRTFile(comp, crt_file) catch |err| {
// TODO Surface more error details.
- try comp.setMiscFailure(
+ comp.lockAndSetMiscFailure(
.wasi_libc_crt_file,
"unable to build WASI libc CRT file: {s}",
.{@errorName(err)},
);
};
},
- .compiler_rt_lib => {
- const named_frame = tracy.namedFrame("compiler_rt_lib");
- defer named_frame.end();
-
- comp.buildOutputFromZig(
- "compiler_rt.zig",
- .Lib,
- &comp.compiler_rt_static_lib,
- .compiler_rt,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.SubCompilationFailed => return, // error reported already
- else => try comp.setMiscFailure(
- .compiler_rt,
- "unable to build compiler_rt: {s}",
- .{@errorName(err)},
- ),
- };
- },
- .compiler_rt_obj => {
- const named_frame = tracy.namedFrame("compiler_rt_obj");
- defer named_frame.end();
-
- comp.buildOutputFromZig(
- "compiler_rt.zig",
- .Obj,
- &comp.compiler_rt_obj,
- .compiler_rt,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.SubCompilationFailed => return, // error reported already
- else => try comp.setMiscFailure(
- .compiler_rt,
- "unable to build compiler_rt: {s}",
- .{@errorName(err)},
- ),
- };
- },
.libssp => {
const named_frame = tracy.namedFrame("libssp");
defer named_frame.end();
@@ -3184,7 +3165,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SubCompilationFailed => return, // error reported already
- else => try comp.setMiscFailure(
+ else => comp.lockAndSetMiscFailure(
.libssp,
"unable to build libssp: {s}",
.{@errorName(err)},
@@ -3203,7 +3184,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SubCompilationFailed => return, // error reported already
- else => try comp.setMiscFailure(
+ else => comp.lockAndSetMiscFailure(
.zig_libc,
"unable to build zig's multitarget libc: {s}",
.{@errorName(err)},
@@ -3307,11 +3288,7 @@ fn workerUpdateBuiltinZigFile(
comp.setMiscFailure(.write_builtin_zig, "unable to write builtin.zig to {s}: {s}", .{
dir_path, @errorName(err),
- }) catch |oom| switch (oom) {
- error.OutOfMemory => log.err("unable to write builtin.zig to {s}: {s}", .{
- dir_path, @errorName(err),
- }),
- };
+ });
};
}
@@ -3525,6 +3502,21 @@ fn workerUpdateCObject(
};
}
+fn buildCompilerRtOneShot(
+ comp: *Compilation,
+ output_mode: std.builtin.OutputMode,
+ out: *?CRTFile,
+) void {
+ comp.buildOutputFromZig("compiler_rt.zig", output_mode, out, .compiler_rt) catch |err| switch (err) {
+ error.SubCompilationFailed => return, // error reported already
+ else => comp.lockAndSetMiscFailure(
+ .compiler_rt,
+ "unable to build compiler_rt: {s}",
+ .{@errorName(err)},
+ ),
+ };
+}
+
fn reportRetryableCObjectError(
comp: *Compilation,
c_object: *CObject,
@@ -4623,14 +4615,21 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
comp.bin_file.options.object_format != .c;
}
-fn setMiscFailure(
+fn setAllocFailure(comp: *Compilation) void {
+ log.debug("memory allocation failure", .{});
+ comp.alloc_failure_occurred = true;
+}
+
+/// Assumes that Compilation mutex is locked.
+/// See also `lockAndSetMiscFailure`.
+pub fn setMiscFailure(
comp: *Compilation,
tag: MiscTask,
comptime format: []const u8,
args: anytype,
-) Allocator.Error!void {
- try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
- const msg = try std.fmt.allocPrint(comp.gpa, format, args);
+) void {
+ comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1) catch return comp.setAllocFailure();
+ const msg = std.fmt.allocPrint(comp.gpa, format, args) catch return comp.setAllocFailure();
const gop = comp.misc_failures.getOrPutAssumeCapacity(tag);
if (gop.found_existing) {
gop.value_ptr.deinit(comp.gpa);
@@ -4638,6 +4637,19 @@ fn setMiscFailure(
gop.value_ptr.* = .{ .msg = msg };
}
+/// See also `setMiscFailure`.
+pub fn lockAndSetMiscFailure(
+ comp: *Compilation,
+ tag: MiscTask,
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+
+ return setMiscFailure(comp, tag, format, args);
+}
+
pub fn dump_argv(argv: []const []const u8) void {
for (argv[0 .. argv.len - 1]) |arg| {
std.debug.print("{s} ", .{arg});
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index 55e40ea287..7115adbddd 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const ThreadPool = @This();
+const WaitGroup = @import("WaitGroup.zig");
mutex: std.Thread.Mutex = .{},
cond: std.Thread.Condition = .{},
@@ -19,8 +20,8 @@ const RunProto = switch (builtin.zig_backend) {
else => *const fn (*Runnable) void,
};
-pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
- self.* = .{
+pub fn init(pool: *ThreadPool, allocator: std.mem.Allocator) !void {
+ pool.* = .{
.allocator = allocator,
.threads = &[_]std.Thread{},
};
@@ -30,48 +31,48 @@ pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
}
const thread_count = std.math.max(1, std.Thread.getCpuCount() catch 1);
- self.threads = try allocator.alloc(std.Thread, thread_count);
- errdefer allocator.free(self.threads);
+ pool.threads = try allocator.alloc(std.Thread, thread_count);
+ errdefer allocator.free(pool.threads);
// kill and join any threads we spawned previously on error.
var spawned: usize = 0;
- errdefer self.join(spawned);
+ errdefer pool.join(spawned);
- for (self.threads) |*thread| {
- thread.* = try std.Thread.spawn(.{}, worker, .{self});
+ for (pool.threads) |*thread| {
+ thread.* = try std.Thread.spawn(.{}, worker, .{pool});
spawned += 1;
}
}
-pub fn deinit(self: *ThreadPool) void {
- self.join(self.threads.len); // kill and join all threads.
- self.* = undefined;
+pub fn deinit(pool: *ThreadPool) void {
+ pool.join(pool.threads.len); // kill and join all threads.
+ pool.* = undefined;
}
-fn join(self: *ThreadPool, spawned: usize) void {
+fn join(pool: *ThreadPool, spawned: usize) void {
if (builtin.single_threaded) {
return;
}
{
- self.mutex.lock();
- defer self.mutex.unlock();
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
// ensure future worker threads exit the dequeue loop
- self.is_running = false;
+ pool.is_running = false;
}
// wake up any sleeping threads (this can be done outside the mutex)
// then wait for all the threads we know are spawned to complete.
- self.cond.broadcast();
- for (self.threads[0..spawned]) |thread| {
+ pool.cond.broadcast();
+ for (pool.threads[0..spawned]) |thread| {
thread.join();
}
- self.allocator.free(self.threads);
+ pool.allocator.free(pool.threads);
}
-pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
+pub fn spawn(pool: *ThreadPool, comptime func: anytype, args: anytype) !void {
if (builtin.single_threaded) {
@call(.{}, func, args);
return;
@@ -98,41 +99,57 @@ pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
};
{
- self.mutex.lock();
- defer self.mutex.unlock();
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
- const closure = try self.allocator.create(Closure);
+ const closure = try pool.allocator.create(Closure);
closure.* = .{
.arguments = args,
- .pool = self,
+ .pool = pool,
};
- self.run_queue.prepend(&closure.run_node);
+ pool.run_queue.prepend(&closure.run_node);
}
// Notify waiting threads outside the lock to try and keep the critical section small.
- self.cond.signal();
+ pool.cond.signal();
}
-fn worker(self: *ThreadPool) void {
- self.mutex.lock();
- defer self.mutex.unlock();
+fn worker(pool: *ThreadPool) void {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
while (true) {
- while (self.run_queue.popFirst()) |run_node| {
+ while (pool.run_queue.popFirst()) |run_node| {
// Temporarily unlock the mutex in order to execute the run_node
- self.mutex.unlock();
- defer self.mutex.lock();
+ pool.mutex.unlock();
+ defer pool.mutex.lock();
const runFn = run_node.data.runFn;
runFn(&run_node.data);
}
// Stop executing instead of waiting if the thread pool is no longer running.
- if (self.is_running) {
- self.cond.wait(&self.mutex);
+ if (pool.is_running) {
+ pool.cond.wait(&pool.mutex);
} else {
break;
}
}
}
+
+pub fn waitAndWork(pool: *ThreadPool, wait_group: *WaitGroup) void {
+ while (!wait_group.isDone()) {
+ if (blk: {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
+ break :blk pool.run_queue.popFirst();
+ }) |run_node| {
+ run_node.data.runFn(&run_node.data);
+ continue;
+ }
+
+ wait_group.wait();
+ return;
+ }
+}
diff --git a/src/WaitGroup.zig b/src/WaitGroup.zig
index 860d0a8b4c..c8be6658db 100644
--- a/src/WaitGroup.zig
+++ b/src/WaitGroup.zig
@@ -37,3 +37,10 @@ pub fn reset(self: *WaitGroup) void {
self.state.store(0, .Monotonic);
self.event.reset();
}
+
+pub fn isDone(wg: *WaitGroup) bool {
+ const state = wg.state.load(.Acquire);
+ assert(state & is_waiting == 0);
+
+ return (state / one_pending) == 0;
+}
diff --git a/src/link.zig b/src/link.zig
index 51712db40e..65e9de8ca3 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -792,11 +792,8 @@ pub const File = struct {
}),
}
}
- if (base.options.object_format == .macho) {
- try base.cast(MachO).?.flushObject(comp, prog_node);
- } else {
- try base.flushModule(comp, prog_node);
- }
+ try base.flushModule(comp, prog_node);
+
const dirname = fs.path.dirname(full_out_path_z) orelse ".";
break :blk try fs.path.join(arena, &.{ dirname, base.intermediary_basename.? });
} else null;
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 37ec8d0758..2943cae36a 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1354,7 +1354,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !
}
// MSVC compiler_rt is missing some stuff, so we build it unconditionally but
// and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
- if (comp.compiler_rt_static_lib) |lib| {
+ if (comp.compiler_rt_lib) |lib| {
try argv.append(lib.full_object_path);
}
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index e0f114acd4..a0e40e5682 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1272,7 +1272,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
const stack_size = self.base.options.stack_size_override orelse 16777216;
const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os;
const compiler_rt_path: ?[]const u8 = blk: {
- if (comp.compiler_rt_static_lib) |x| break :blk x.full_object_path;
+ if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index a4d14b985f..c71007157a 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -436,7 +436,7 @@ pub fn flush(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node) !v
return error.TODOImplementWritingStaticLibFiles;
}
}
- try self.flushModule(comp, prog_node);
+ return self.flushModule(comp, prog_node);
}
pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@@ -444,8 +444,23 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
defer tracy.end();
const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
- if (!use_stage1 and self.base.options.output_mode == .Obj)
- return self.flushObject(comp, prog_node);
+
+ if (build_options.have_llvm and !use_stage1) {
+ if (self.llvm_object) |llvm_object| {
+ try llvm_object.flushModule(comp, prog_node);
+
+ llvm_object.destroy(self.base.allocator);
+ self.llvm_object = null;
+
+ if (self.base.options.output_mode == .Lib and self.base.options.link_mode == .Static) {
+ return;
+ }
+ }
+ }
+
+ var sub_prog_node = prog_node.start("MachO Flush", 0);
+ sub_prog_node.activate();
+ defer sub_prog_node.end();
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
@@ -454,12 +469,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
- if (self.d_sym) |*d_sym| {
- if (self.base.options.module) |module| {
- try d_sym.dwarf.flushModule(&self.base, module);
- }
- }
-
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
@@ -482,8 +491,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const obj_basename = self.base.intermediary_basename orelse break :blk null;
- try self.flushObject(comp, prog_node);
-
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, obj_basename });
} else {
@@ -491,9 +498,11 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
}
} else null;
- var sub_prog_node = prog_node.start("MachO Flush", 0);
- sub_prog_node.activate();
- defer sub_prog_node.end();
+ if (self.d_sym) |*d_sym| {
+ if (self.base.options.module) |module| {
+ try d_sym.dwarf.flushModule(&self.base, module);
+ }
+ }
const is_lib = self.base.options.output_mode == .Lib;
const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
@@ -738,7 +747,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try positionals.append(p);
}
- if (comp.compiler_rt_static_lib) |lib| {
+ if (comp.compiler_rt_lib) |lib| {
try positionals.append(lib.full_object_path);
}
@@ -1119,17 +1128,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
self.cold_start = false;
}
-pub fn flushObject(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- if (build_options.have_llvm)
- if (self.llvm_object) |llvm_object|
- return llvm_object.flushModule(comp, prog_node);
-
- return error.TODOImplementWritingObjFiles;
-}
-
fn resolveSearchDir(
arena: Allocator,
dir: []const u8,
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index ee2ed19ed5..5a910e188b 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2255,7 +2255,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
const is_obj = self.base.options.output_mode == .Obj;
const compiler_rt_path: ?[]const u8 = if (self.base.options.include_compiler_rt and !is_obj)
- comp.compiler_rt_static_lib.?.full_object_path
+ comp.compiler_rt_lib.?.full_object_path
else
null;
diff --git a/src/musl.zig b/src/musl.zig
index d061addc9a..68b524b415 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const path = std.fs.path;
const assert = std.debug.assert;
-const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");