aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes4
-rw-r--r--CMakeLists.txt18
-rw-r--r--LICENSE2
-rw-r--r--build.zig56
-rw-r--r--ci/aarch64-linux-debug.sh2
-rw-r--r--ci/aarch64-linux-release.sh4
-rwxr-xr-xci/aarch64-macos.sh18
-rwxr-xr-xci/x86_64-linux-debug.sh2
-rwxr-xr-xci/x86_64-linux-release.sh4
-rwxr-xr-xci/x86_64-macos.sh18
-rw-r--r--doc/langref.html.in307
-rw-r--r--lib/compiler_rt.zig1
-rw-r--r--lib/compiler_rt/absvdi2.zig2
-rw-r--r--lib/compiler_rt/absvsi2.zig2
-rw-r--r--lib/compiler_rt/absvti2.zig2
-rw-r--r--lib/compiler_rt/adddf3.zig4
-rw-r--r--lib/compiler_rt/addhf3.zig2
-rw-r--r--lib/compiler_rt/addo.zig8
-rw-r--r--lib/compiler_rt/addsf3.zig4
-rw-r--r--lib/compiler_rt/addtf3.zig6
-rw-r--r--lib/compiler_rt/addxf3.zig2
-rw-r--r--lib/compiler_rt/arm.zig40
-rw-r--r--lib/compiler_rt/atomics.zig114
-rw-r--r--lib/compiler_rt/aulldiv.zig4
-rw-r--r--lib/compiler_rt/aullrem.zig4
-rw-r--r--lib/compiler_rt/bcmp.zig2
-rw-r--r--lib/compiler_rt/bswap.zig6
-rw-r--r--lib/compiler_rt/ceil.zig14
-rw-r--r--lib/compiler_rt/clzsi2_test.zig26
-rw-r--r--lib/compiler_rt/cmp.zig12
-rw-r--r--lib/compiler_rt/cmpdf2.zig16
-rw-r--r--lib/compiler_rt/cmphf2.zig10
-rw-r--r--lib/compiler_rt/cmpsf2.zig16
-rw-r--r--lib/compiler_rt/cmptf2.zig32
-rw-r--r--lib/compiler_rt/cmpxf2.zig10
-rw-r--r--lib/compiler_rt/common.zig4
-rw-r--r--lib/compiler_rt/cos.zig14
-rw-r--r--lib/compiler_rt/count0bits.zig18
-rw-r--r--lib/compiler_rt/divdc3.zig2
-rw-r--r--lib/compiler_rt/divdf3.zig4
-rw-r--r--lib/compiler_rt/divhc3.zig2
-rw-r--r--lib/compiler_rt/divhf3.zig2
-rw-r--r--lib/compiler_rt/divsc3.zig2
-rw-r--r--lib/compiler_rt/divsf3.zig4
-rw-r--r--lib/compiler_rt/divtc3.zig2
-rw-r--r--lib/compiler_rt/divtf3.zig6
-rw-r--r--lib/compiler_rt/divti3.zig21
-rw-r--r--lib/compiler_rt/divxc3.zig2
-rw-r--r--lib/compiler_rt/divxf3.zig2
-rw-r--r--lib/compiler_rt/emutls.zig4
-rw-r--r--lib/compiler_rt/exp.zig14
-rw-r--r--lib/compiler_rt/exp2.zig14
-rw-r--r--lib/compiler_rt/extenddftf2.zig6
-rw-r--r--lib/compiler_rt/extenddfxf2.zig2
-rw-r--r--lib/compiler_rt/extendhfdf2.zig2
-rw-r--r--lib/compiler_rt/extendhfsf2.zig6
-rw-r--r--lib/compiler_rt/extendhftf2.zig2
-rw-r--r--lib/compiler_rt/extendhfxf2.zig2
-rw-r--r--lib/compiler_rt/extendsfdf2.zig4
-rw-r--r--lib/compiler_rt/extendsftf2.zig6
-rw-r--r--lib/compiler_rt/extendsfxf2.zig2
-rw-r--r--lib/compiler_rt/extendxftf2.zig2
-rw-r--r--lib/compiler_rt/fabs.zig14
-rw-r--r--lib/compiler_rt/fixdfdi.zig4
-rw-r--r--lib/compiler_rt/fixdfsi.zig4
-rw-r--r--lib/compiler_rt/fixdfti.zig4
-rw-r--r--lib/compiler_rt/fixhfdi.zig2
-rw-r--r--lib/compiler_rt/fixhfsi.zig2
-rw-r--r--lib/compiler_rt/fixhfti.zig4
-rw-r--r--lib/compiler_rt/fixsfdi.zig4
-rw-r--r--lib/compiler_rt/fixsfsi.zig4
-rw-r--r--lib/compiler_rt/fixsfti.zig4
-rw-r--r--lib/compiler_rt/fixtfdi.zig6
-rw-r--r--lib/compiler_rt/fixtfsi.zig6
-rw-r--r--lib/compiler_rt/fixtfti.zig4
-rw-r--r--lib/compiler_rt/fixunsdfdi.zig4
-rw-r--r--lib/compiler_rt/fixunsdfsi.zig4
-rw-r--r--lib/compiler_rt/fixunsdfti.zig4
-rw-r--r--lib/compiler_rt/fixunshfdi.zig2
-rw-r--r--lib/compiler_rt/fixunshfsi.zig2
-rw-r--r--lib/compiler_rt/fixunshfti.zig6
-rw-r--r--lib/compiler_rt/fixunssfdi.zig4
-rw-r--r--lib/compiler_rt/fixunssfsi.zig4
-rw-r--r--lib/compiler_rt/fixunssfti.zig4
-rw-r--r--lib/compiler_rt/fixunstfdi.zig6
-rw-r--r--lib/compiler_rt/fixunstfsi.zig6
-rw-r--r--lib/compiler_rt/fixunstfti.zig4
-rw-r--r--lib/compiler_rt/fixunsxfdi.zig2
-rw-r--r--lib/compiler_rt/fixunsxfsi.zig2
-rw-r--r--lib/compiler_rt/fixunsxfti.zig4
-rw-r--r--lib/compiler_rt/fixxfdi.zig2
-rw-r--r--lib/compiler_rt/fixxfsi.zig2
-rw-r--r--lib/compiler_rt/fixxfti.zig4
-rw-r--r--lib/compiler_rt/floatdidf.zig4
-rw-r--r--lib/compiler_rt/floatdihf.zig2
-rw-r--r--lib/compiler_rt/floatdisf.zig4
-rw-r--r--lib/compiler_rt/floatditf.zig6
-rw-r--r--lib/compiler_rt/floatdixf.zig2
-rw-r--r--lib/compiler_rt/floatsidf.zig4
-rw-r--r--lib/compiler_rt/floatsihf.zig2
-rw-r--r--lib/compiler_rt/floatsisf.zig4
-rw-r--r--lib/compiler_rt/floatsitf.zig6
-rw-r--r--lib/compiler_rt/floatsixf.zig2
-rw-r--r--lib/compiler_rt/floattidf.zig4
-rw-r--r--lib/compiler_rt/floattihf.zig4
-rw-r--r--lib/compiler_rt/floattisf.zig4
-rw-r--r--lib/compiler_rt/floattitf.zig4
-rw-r--r--lib/compiler_rt/floattixf.zig4
-rw-r--r--lib/compiler_rt/floatundidf.zig4
-rw-r--r--lib/compiler_rt/floatundihf.zig2
-rw-r--r--lib/compiler_rt/floatundisf.zig4
-rw-r--r--lib/compiler_rt/floatunditf.zig6
-rw-r--r--lib/compiler_rt/floatundixf.zig2
-rw-r--r--lib/compiler_rt/floatunsidf.zig4
-rw-r--r--lib/compiler_rt/floatunsihf.zig2
-rw-r--r--lib/compiler_rt/floatunsisf.zig4
-rw-r--r--lib/compiler_rt/floatunsitf.zig6
-rw-r--r--lib/compiler_rt/floatunsixf.zig2
-rw-r--r--lib/compiler_rt/floatuntidf.zig4
-rw-r--r--lib/compiler_rt/floatuntihf.zig4
-rw-r--r--lib/compiler_rt/floatuntisf.zig4
-rw-r--r--lib/compiler_rt/floatuntitf.zig6
-rw-r--r--lib/compiler_rt/floatuntixf.zig4
-rw-r--r--lib/compiler_rt/floor.zig14
-rw-r--r--lib/compiler_rt/fma.zig14
-rw-r--r--lib/compiler_rt/fmax.zig14
-rw-r--r--lib/compiler_rt/fmin.zig14
-rw-r--r--lib/compiler_rt/fmod.zig14
-rw-r--r--lib/compiler_rt/gedf2.zig8
-rw-r--r--lib/compiler_rt/gehf2.zig4
-rw-r--r--lib/compiler_rt/gesf2.zig8
-rw-r--r--lib/compiler_rt/getf2.zig8
-rw-r--r--lib/compiler_rt/gexf2.zig4
-rw-r--r--lib/compiler_rt/int.zig30
-rw-r--r--lib/compiler_rt/int_to_float_test.zig3
-rw-r--r--lib/compiler_rt/log.zig14
-rw-r--r--lib/compiler_rt/log10.zig14
-rw-r--r--lib/compiler_rt/log2.zig14
-rw-r--r--lib/compiler_rt/memcmp.zig2
-rw-r--r--lib/compiler_rt/memcpy.zig2
-rw-r--r--lib/compiler_rt/memmove.zig2
-rw-r--r--lib/compiler_rt/memset.zig4
-rw-r--r--lib/compiler_rt/modti3.zig4
-rw-r--r--lib/compiler_rt/muldc3.zig2
-rw-r--r--lib/compiler_rt/muldf3.zig4
-rw-r--r--lib/compiler_rt/muldi3.zig4
-rw-r--r--lib/compiler_rt/mulhc3.zig2
-rw-r--r--lib/compiler_rt/mulhf3.zig2
-rw-r--r--lib/compiler_rt/mulo.zig15
-rw-r--r--lib/compiler_rt/mulsc3.zig2
-rw-r--r--lib/compiler_rt/mulsf3.zig4
-rw-r--r--lib/compiler_rt/multc3.zig2
-rw-r--r--lib/compiler_rt/multf3.zig6
-rw-r--r--lib/compiler_rt/multi3.zig4
-rw-r--r--lib/compiler_rt/mulxc3.zig2
-rw-r--r--lib/compiler_rt/mulxf3.zig2
-rw-r--r--lib/compiler_rt/negXi2.zig6
-rw-r--r--lib/compiler_rt/negdf2.zig4
-rw-r--r--lib/compiler_rt/neghf2.zig2
-rw-r--r--lib/compiler_rt/negsf2.zig4
-rw-r--r--lib/compiler_rt/negtf2.zig2
-rw-r--r--lib/compiler_rt/negv.zig6
-rw-r--r--lib/compiler_rt/negxf2.zig2
-rw-r--r--lib/compiler_rt/parity.zig6
-rw-r--r--lib/compiler_rt/popcount.zig6
-rw-r--r--lib/compiler_rt/round.zig14
-rw-r--r--lib/compiler_rt/shift.zig18
-rw-r--r--lib/compiler_rt/sin.zig14
-rw-r--r--lib/compiler_rt/sincos.zig14
-rw-r--r--lib/compiler_rt/sqrt.zig14
-rw-r--r--lib/compiler_rt/subdf3.zig4
-rw-r--r--lib/compiler_rt/subhf3.zig2
-rw-r--r--lib/compiler_rt/subo.zig6
-rw-r--r--lib/compiler_rt/subsf3.zig4
-rw-r--r--lib/compiler_rt/subtf3.zig6
-rw-r--r--lib/compiler_rt/subxf3.zig2
-rw-r--r--lib/compiler_rt/tan.zig14
-rw-r--r--lib/compiler_rt/trunc.zig14
-rw-r--r--lib/compiler_rt/truncdfhf2.zig4
-rw-r--r--lib/compiler_rt/truncdfsf2.zig4
-rw-r--r--lib/compiler_rt/truncsfhf2.zig6
-rw-r--r--lib/compiler_rt/trunctfdf2.zig6
-rw-r--r--lib/compiler_rt/trunctfhf2.zig2
-rw-r--r--lib/compiler_rt/trunctfsf2.zig6
-rw-r--r--lib/compiler_rt/trunctfxf2.zig16
-rw-r--r--lib/compiler_rt/truncxfdf2.zig2
-rw-r--r--lib/compiler_rt/truncxfhf2.zig2
-rw-r--r--lib/compiler_rt/truncxfsf2.zig2
-rw-r--r--lib/compiler_rt/udivmodei4.zig145
-rw-r--r--lib/compiler_rt/udivmodti4.zig4
-rw-r--r--lib/compiler_rt/udivti3.zig4
-rw-r--r--lib/compiler_rt/umodti3.zig4
-rw-r--r--lib/compiler_rt/unorddf2.zig4
-rw-r--r--lib/compiler_rt/unordhf2.zig2
-rw-r--r--lib/compiler_rt/unordsf2.zig4
-rw-r--r--lib/compiler_rt/unordtf2.zig4
-rw-r--r--lib/compiler_rt/unordxf2.zig2
-rw-r--r--lib/docs/index.html2
-rw-r--r--lib/libc/mingw/stdio/_vscprintf.c86
-rw-r--r--lib/libcxx/include/__config11
-rw-r--r--lib/libcxx/include/__locale6
-rw-r--r--lib/libcxx/include/locale2
-rw-r--r--lib/libcxx/src/locale.cpp2
-rw-r--r--lib/std/Url.zig98
-rw-r--r--lib/std/array_hash_map.zig8
-rw-r--r--lib/std/array_list.zig45
-rw-r--r--lib/std/build/CheckObjectStep.zig122
-rw-r--r--lib/std/build/LibExeObjStep.zig8
-rw-r--r--lib/std/builtin.zig100
-rw-r--r--lib/std/c.zig4
-rw-r--r--lib/std/c/darwin.zig11
-rw-r--r--lib/std/c/dragonfly.zig16
-rw-r--r--lib/std/c/freebsd.zig15
-rw-r--r--lib/std/c/haiku.zig12
-rw-r--r--lib/std/c/linux.zig4
-rw-r--r--lib/std/c/netbsd.zig21
-rw-r--r--lib/std/c/openbsd.zig13
-rw-r--r--lib/std/c/solaris.zig11
-rw-r--r--lib/std/child_process.zig430
-rw-r--r--lib/std/compress/deflate/bits_utils.zig3
-rw-r--r--lib/std/compress/deflate/compressor.zig4
-rw-r--r--lib/std/compress/deflate/compressor_test.zig58
-rw-r--r--lib/std/compress/deflate/decompressor.zig7
-rw-r--r--lib/std/compress/deflate/deflate_fast.zig41
-rw-r--r--lib/std/compress/deflate/deflate_fast_test.zig8
-rw-r--r--lib/std/compress/deflate/dict_decoder.zig3
-rw-r--r--lib/std/compress/deflate/huffman_bit_writer.zig93
-rw-r--r--lib/std/compress/deflate/huffman_code.zig66
-rw-r--r--lib/std/compress/deflate/token.zig3
-rw-r--r--lib/std/compress/gzip.zig6
-rw-r--r--lib/std/compress/testdata/rfc1951.txt (renamed from lib/std/compress/rfc1951.txt)0
-rw-r--r--lib/std/compress/testdata/rfc1951.txt.fixed.z.9 (renamed from lib/std/compress/rfc1951.txt.fixed.z.9)bin12836 -> 12836 bytes
-rw-r--r--lib/std/compress/testdata/rfc1951.txt.z.0 (renamed from lib/std/compress/rfc1951.txt.z.0)bin36960 -> 36960 bytes
-rw-r--r--lib/std/compress/testdata/rfc1951.txt.z.9 (renamed from lib/std/compress/rfc1951.txt.z.9)bin11111 -> 11111 bytes
-rw-r--r--lib/std/compress/testdata/rfc1952.txt (renamed from lib/std/compress/rfc1952.txt)0
-rw-r--r--lib/std/compress/testdata/rfc1952.txt.gz (renamed from lib/std/compress/rfc1952.txt.gz)bin8059 -> 8059 bytes
-rw-r--r--lib/std/compress/zlib.zig10
-rw-r--r--lib/std/crypto.zig5
-rw-r--r--lib/std/crypto/Certificate.zig1115
-rw-r--r--lib/std/crypto/Certificate/Bundle.zig189
-rw-r--r--lib/std/crypto/aegis.zig4
-rw-r--r--lib/std/crypto/aes_gcm.zig2
-rw-r--r--lib/std/crypto/hkdf.zig48
-rw-r--r--lib/std/crypto/pcurves/p256/p256_64.zig16
-rw-r--r--lib/std/crypto/pcurves/p256/p256_scalar_64.zig16
-rw-r--r--lib/std/crypto/pcurves/p384/p384_64.zig16
-rw-r--r--lib/std/crypto/pcurves/p384/p384_scalar_64.zig16
-rw-r--r--lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig16
-rw-r--r--lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig16
-rw-r--r--lib/std/crypto/phc_encoding.zig8
-rw-r--r--lib/std/crypto/salsa20.zig4
-rw-r--r--lib/std/crypto/sha2.zig22
-rw-r--r--lib/std/crypto/sha3.zig22
-rw-r--r--lib/std/crypto/tls.zig494
-rw-r--r--lib/std/crypto/tls/Client.zig1308
-rw-r--r--lib/std/crypto/utils.zig24
-rw-r--r--lib/std/debug.zig55
-rw-r--r--lib/std/enums.zig22
-rw-r--r--lib/std/fmt.zig20
-rw-r--r--lib/std/fmt/parse_float.zig5
-rw-r--r--lib/std/fs.zig9
-rw-r--r--lib/std/fs/path.zig4
-rw-r--r--lib/std/fs/test.zig4
-rw-r--r--lib/std/hash/auto_hash.zig6
-rw-r--r--lib/std/hash_map.zig20
-rw-r--r--lib/std/heap.zig12
-rw-r--r--lib/std/http.zig301
-rw-r--r--lib/std/http/Client.zig181
-rw-r--r--lib/std/http/method.zig65
-rw-r--r--lib/std/http/status.zig182
-rw-r--r--lib/std/io/buffered_reader.zig121
-rw-r--r--lib/std/io/fixed_buffer_stream.zig11
-rw-r--r--lib/std/io/multi_writer.zig2
-rw-r--r--lib/std/json.zig26
-rw-r--r--lib/std/leb128.zig26
-rw-r--r--lib/std/log.zig42
-rw-r--r--lib/std/macho.zig277
-rw-r--r--lib/std/math.zig99
-rw-r--r--lib/std/math/big/int.zig234
-rw-r--r--lib/std/math/powi.zig18
-rw-r--r--lib/std/math/sqrt.zig5
-rw-r--r--lib/std/mem.zig63
-rw-r--r--lib/std/meta.zig90
-rw-r--r--lib/std/meta/trailer_flags.zig20
-rw-r--r--lib/std/meta/trait.zig4
-rw-r--r--lib/std/multi_array_list.zig26
-rw-r--r--lib/std/net.zig77
-rw-r--r--lib/std/os.zig108
-rw-r--r--lib/std/os/linux.zig55
-rw-r--r--lib/std/os/linux/seccomp.zig18
-rw-r--r--lib/std/os/test.zig9
-rw-r--r--lib/std/os/uefi/protocols/device_path_protocol.zig4
-rw-r--r--lib/std/os/windows.zig93
-rw-r--r--lib/std/os/windows/kernel32.zig2
-rw-r--r--lib/std/os/windows/ws2_32.zig19
-rw-r--r--lib/std/packed_int_array.zig6
-rw-r--r--lib/std/process.zig26
-rw-r--r--lib/std/rand/Xoroshiro128.zig4
-rw-r--r--lib/std/rand/Xoshiro256.zig4
-rw-r--r--lib/std/simd.zig4
-rw-r--r--lib/std/start.zig2
-rw-r--r--lib/std/std.zig5
-rw-r--r--lib/std/target.zig10
-rw-r--r--lib/std/testing.zig4
-rw-r--r--lib/std/wasm.zig4
-rw-r--r--lib/std/x.zig19
-rw-r--r--lib/std/x/net/bpf.zig1003
-rw-r--r--lib/std/x/net/ip.zig57
-rw-r--r--lib/std/x/net/tcp.zig447
-rw-r--r--lib/std/x/os/io.zig224
-rw-r--r--lib/std/x/os/net.zig605
-rw-r--r--lib/std/x/os/socket.zig320
-rw-r--r--lib/std/x/os/socket_posix.zig275
-rw-r--r--lib/std/x/os/socket_windows.zig458
-rw-r--r--lib/std/zig/Ast.zig22
-rw-r--r--lib/std/zig/c_builtins.zig4
-rw-r--r--lib/std/zig/c_translation.zig2
-rw-r--r--lib/std/zig/number_literal.zig12
-rw-r--r--lib/std/zig/parse.zig113
-rw-r--r--lib/std/zig/parser_test.zig73
-rw-r--r--lib/std/zig/render.zig10
-rw-r--r--lib/std/zig/system/x86.zig35
-rw-r--r--lib/zig.h738
-rw-r--r--src/Air.zig38
-rw-r--r--src/AstGen.zig97
-rw-r--r--src/Autodoc.zig50
-rw-r--r--src/BuiltinFn.zig36
-rw-r--r--src/Compilation.zig96
-rw-r--r--src/InternPool.zig4
-rw-r--r--src/Liveness.zig25
-rw-r--r--src/Module.zig33
-rw-r--r--src/Sema.zig1463
-rw-r--r--src/TypedValue.zig4
-rw-r--r--src/Zir.zig41
-rw-r--r--src/arch/aarch64/CodeGen.zig229
-rw-r--r--src/arch/aarch64/Mir.zig2
-rw-r--r--src/arch/aarch64/abi.zig23
-rw-r--r--src/arch/aarch64/bits.zig4
-rw-r--r--src/arch/arm/CodeGen.zig13
-rw-r--r--src/arch/arm/Mir.zig2
-rw-r--r--src/arch/riscv64/CodeGen.zig17
-rw-r--r--src/arch/riscv64/Mir.zig2
-rw-r--r--src/arch/sparc64/CodeGen.zig15
-rw-r--r--src/arch/sparc64/Mir.zig2
-rw-r--r--src/arch/wasm/CodeGen.zig16
-rw-r--r--src/arch/wasm/Emit.zig35
-rw-r--r--src/arch/wasm/Mir.zig12
-rw-r--r--src/arch/x86_64/CodeGen.zig11
-rw-r--r--src/arch/x86_64/Emit.zig30
-rw-r--r--src/arch/x86_64/Mir.zig2
-rw-r--r--src/arch/x86_64/abi.zig3
-rw-r--r--src/clang_options_data.zig18
-rw-r--r--src/codegen.zig8
-rw-r--r--src/codegen/c.zig776
-rw-r--r--src/codegen/llvm.zig770
-rw-r--r--src/codegen/llvm/bindings.zig9
-rw-r--r--src/codegen/spirv/Section.zig14
-rw-r--r--src/empty.zig0
-rw-r--r--src/link.zig6
-rw-r--r--src/link/Coff.zig14
-rw-r--r--src/link/Dwarf.zig4
-rw-r--r--src/link/Elf.zig34
-rw-r--r--src/link/MachO.zig630
-rw-r--r--src/link/MachO/CodeSignature.zig89
-rw-r--r--src/link/MachO/DebugSymbols.zig82
-rw-r--r--src/link/MachO/load_commands.zig314
-rw-r--r--src/link/MachO/zld.zig844
-rw-r--r--src/link/Plan9.zig22
-rw-r--r--src/link/SpirV.zig2
-rw-r--r--src/link/Wasm.zig585
-rw-r--r--src/link/Wasm/Archive.zig15
-rw-r--r--src/link/Wasm/Atom.zig75
-rw-r--r--src/link/Wasm/Object.zig8
-rw-r--r--src/link/Wasm/Symbol.zig10
-rw-r--r--src/link/tapi/yaml.zig10
-rw-r--r--src/main.zig42
-rw-r--r--src/mingw.zig1
-rw-r--r--src/print_air.zig25
-rw-r--r--src/print_zir.zig18
-rw-r--r--src/target.zig3
-rw-r--r--src/test.zig6
-rw-r--r--src/translate_c/ast.zig4
-rw-r--r--src/type.zig52
-rw-r--r--src/value.zig499
-rw-r--r--src/zig_llvm.cpp13
-rw-r--r--src/zig_llvm.h2
-rw-r--r--stage1/zig1.wasmbin2314930 -> 2354154 bytes
-rw-r--r--test/behavior.zig16
-rw-r--r--test/behavior/align.zig1
-rw-r--r--test/behavior/alignof.zig1
-rw-r--r--test/behavior/array.zig45
-rw-r--r--test/behavior/asm.zig5
-rw-r--r--test/behavior/atomics.zig166
-rw-r--r--test/behavior/basic.zig36
-rw-r--r--test/behavior/bitcast.zig9
-rw-r--r--test/behavior/bugs/11995.zig34
-rw-r--r--test/behavior/bugs/12000.zig (renamed from test/behavior/bugs/13164.zig)18
-rw-r--r--test/behavior/bugs/12051.zig39
-rw-r--r--test/behavior/bugs/12092.zig28
-rw-r--r--test/behavior/bugs/12119.zig16
-rw-r--r--test/behavior/bugs/12142.zig37
-rw-r--r--test/behavior/bugs/12169.zig20
-rw-r--r--test/behavior/bugs/12450.zig21
-rw-r--r--test/behavior/bugs/12571.zig23
-rw-r--r--test/behavior/bugs/12786.zig2
-rw-r--r--test/behavior/bugs/12794.zig2
-rw-r--r--test/behavior/bugs/12885.zig6
-rw-r--r--test/behavior/bugs/13113.zig21
-rw-r--r--test/behavior/bugs/13366.zig28
-rw-r--r--test/behavior/bugs/13435.zig2
-rw-r--r--test/behavior/bugs/3742.zig2
-rw-r--r--test/behavior/bugs/6456.zig2
-rw-r--r--test/behavior/byval_arg_var.zig1
-rw-r--r--test/behavior/call.zig69
-rw-r--r--test/behavior/cast.zig49
-rw-r--r--test/behavior/empty_file_level_struct.zig1
-rw-r--r--test/behavior/empty_file_level_union.zig1
-rw-r--r--test/behavior/empty_tuple_fields.zig26
-rw-r--r--test/behavior/error.zig51
-rw-r--r--test/behavior/eval.zig124
-rw-r--r--test/behavior/field_parent_ptr.zig2
-rw-r--r--test/behavior/fn.zig65
-rw-r--r--test/behavior/for.zig22
-rw-r--r--test/behavior/generics.zig1
-rw-r--r--test/behavior/if.zig2
-rw-r--r--test/behavior/int128.zig16
-rw-r--r--test/behavior/int_comparison_elision.zig108
-rw-r--r--test/behavior/lower_strlit_to_vector.zig18
-rw-r--r--test/behavior/math.zig452
-rw-r--r--test/behavior/merge_error_sets.zig1
-rw-r--r--test/behavior/null.zig4
-rw-r--r--test/behavior/optional.zig43
-rw-r--r--test/behavior/packed-struct.zig32
-rw-r--r--test/behavior/pointers.zig2
-rw-r--r--test/behavior/ptrcast.zig61
-rw-r--r--test/behavior/reflection.zig8
-rw-r--r--test/behavior/sizeof_and_typeof.zig6
-rw-r--r--test/behavior/slice.zig47
-rw-r--r--test/behavior/struct.zig206
-rw-r--r--test/behavior/struct_contains_slice_of_itself.zig1
-rw-r--r--test/behavior/switch.zig2
-rw-r--r--test/behavior/translate_c_macros.zig4
-rw-r--r--test/behavior/try.zig1
-rw-r--r--test/behavior/tuple.zig75
-rw-r--r--test/behavior/tuple_declarations.zig10
-rw-r--r--test/behavior/type.zig48
-rw-r--r--test/behavior/type_info.zig46
-rw-r--r--test/behavior/undefined.zig2
-rw-r--r--test/behavior/underscore.zig1
-rw-r--r--test/behavior/union.zig38
-rw-r--r--test/behavior/var_args.zig118
-rw-r--r--test/behavior/vector.zig104
-rw-r--r--test/behavior/void.zig1
-rw-r--r--test/behavior/while.zig8
-rw-r--r--test/c_abi/cfuncs.c13
-rw-r--r--test/c_abi/main.zig17
-rw-r--r--test/cases/compile_errors/cImport_with_bogus_include.zig9
-rw-r--r--test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig2
-rw-r--r--test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig21
-rw-r--r--test/cases/compile_errors/comptime_store_in_comptime_switch_in_runtime_if.zig2
-rw-r--r--test/cases/compile_errors/control_flow_uses_comptime_var_at_runtime.zig25
-rw-r--r--test/cases/compile_errors/dereference_anyopaque.zig2
-rw-r--r--test/cases/compile_errors/error_in_comptime_call_in_container_level_initializer.zig22
-rw-r--r--test/cases/compile_errors/generic_instantiation_failure.zig27
-rw-r--r--test/cases/compile_errors/generic_instantiation_failure_in_generic_function_return_type.zig14
-rw-r--r--test/cases/compile_errors/ignored_comptime_value.zig17
-rw-r--r--test/cases/compile_errors/implicit_cast_const_array_to_mutable_slice.zig7
-rw-r--r--test/cases/compile_errors/incompatible sub-byte fields.zig2
-rw-r--r--test/cases/compile_errors/intToEnum_on_non-exhaustive_enums_checks_int_in_range.zig11
-rw-r--r--test/cases/compile_errors/invalid_capture_type.zig24
-rw-r--r--test/cases/compile_errors/invalid_store_to_comptime_field.zig6
-rw-r--r--test/cases/compile_errors/invalid_struct_field.zig16
-rw-r--r--test/cases/compile_errors/invalid_variadic_function.zig12
-rw-r--r--test/cases/compile_errors/load_vector_pointer_with_unknown_runtime_index.zig17
-rw-r--r--test/cases/compile_errors/noalias_on_non_pointer_param.zig6
-rw-r--r--test/cases/compile_errors/packed_struct_field_alignment_unavailable_for_reify_type.zig2
-rw-r--r--test/cases/compile_errors/recursive_inline_fn.zig18
-rw-r--r--test/cases/compile_errors/reference_to_const_data.zig5
-rw-r--r--test/cases/compile_errors/reified_enum_field_value_overflow.zig1
-rw-r--r--test/cases/compile_errors/reify_enum_with_duplicate_field.zig1
-rw-r--r--test/cases/compile_errors/reify_enum_with_duplicate_tag_value.zig1
-rw-r--r--test/cases/compile_errors/reify_struct.zig10
-rw-r--r--test/cases/compile_errors/reify_type.Fn_with_is_generic_true.zig2
-rw-r--r--test/cases/compile_errors/reify_type.Fn_with_is_var_args_true_and_non-C_callconv.zig2
-rw-r--r--test/cases/compile_errors/reify_type.Fn_with_return_type_null.zig2
-rw-r--r--test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig1
-rw-r--r--test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig1
-rw-r--r--test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig7
-rw-r--r--test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig9
-rw-r--r--test/cases/compile_errors/reify_type_for_union_with_opaque_field.zig4
-rw-r--r--test/cases/compile_errors/return_incompatible_generic_struct.zig20
-rw-r--r--test/cases/compile_errors/stage1/obj/invalid_maybe_type.zig9
-rw-r--r--test/cases/compile_errors/stage1/obj/load_vector_pointer_with_unknown_runtime_index.zig17
-rw-r--r--test/cases/compile_errors/stage1/obj/store_vector_pointer_with_unknown_runtime_index.zig16
-rw-r--r--test/cases/compile_errors/store_vector_pointer_with_unknown_runtime_index.zig16
-rw-r--r--test/cases/fn_typeinfo_passed_to_comptime_fn.zig2
-rw-r--r--test/cases/plan9/hello_world_with_updates.1.zig1
-rw-r--r--test/link.zig25
-rw-r--r--test/link/macho/strict_validation/build.zig119
-rw-r--r--test/link/macho/strict_validation/main.zig6
-rw-r--r--test/link/macho/uuid/build.zig62
-rw-r--r--test/link/macho/uuid/test.c2
-rw-r--r--test/link/wasm/bss/build.zig3
-rw-r--r--test/link/wasm/export-data/build.zig39
-rw-r--r--test/link/wasm/export-data/lib.zig2
-rw-r--r--test/link/wasm/export/build.zig48
-rw-r--r--test/link/wasm/export/main.zig1
-rw-r--r--test/link/wasm/extern-mangle/build.zig2
-rw-r--r--test/link/wasm/function-table/build.zig63
-rw-r--r--test/link/wasm/function-table/lib.zig7
-rw-r--r--test/stack_traces.zig134
-rw-r--r--test/standalone.zig9
-rw-r--r--test/standalone/windows_spawn/build.zig16
-rw-r--r--test/standalone/windows_spawn/hello.zig6
-rw-r--r--test/standalone/windows_spawn/main.zig170
-rw-r--r--tools/extract-grammar.zig100
-rw-r--r--tools/gen_stubs.zig2
-rw-r--r--tools/stage2_gdb_pretty_printers.py5
-rw-r--r--tools/update_clang_options.zig8
519 files changed, 15469 insertions, 8832 deletions
diff --git a/.gitattributes b/.gitattributes
index 2983afbfc3..581789817c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,9 +1,9 @@
*.zig text eol=lf
*.txt text eol=lf
langref.html.in text eol=lf
-deps/SoftFloat-3e/*.txt text eol=crlf
+lib/std/compress/testdata/** binary
+lib/std/compress/deflate/testdata/** binary
-deps/** linguist-vendored
lib/include/** linguist-vendored
lib/libc/** linguist-vendored
lib/libcxx/** linguist-vendored
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fa900dbe93..ece221632f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -472,6 +472,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfhf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmod.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodei4.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodti4.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/umodti3.zig"
@@ -585,10 +586,13 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/MachO/DwarfInfo.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Dylib.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Object.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/MachO/Relocation.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/ZldAtom.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/bind.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/MachO/fat.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/MachO/load_commands.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/zld.zig"
"${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
@@ -704,19 +708,15 @@ target_link_libraries(zigcpp LINK_PUBLIC
${CMAKE_THREAD_LIBS_INIT}
)
-if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "AMD64")
+string(TOLOWER "${CMAKE_HOST_SYSTEM_PROCESSOR}" HOST_TARGET_ARCH)
+if(HOST_TARGET_ARCH STREQUAL "amd64")
set(HOST_TARGET_ARCH "x86_64")
-elseif(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "arm64")
+elseif(HOST_TARGET_ARCH STREQUAL "arm64")
set(HOST_TARGET_ARCH "aarch64")
-elseif(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "ARM64")
- set(HOST_TARGET_ARCH "aarch64")
-else()
- string(TOLOWER "${CMAKE_HOST_SYSTEM_PROCESSOR}" HOST_TARGET_ARCH)
endif()
-if(CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin")
+string(TOLOWER "${CMAKE_HOST_SYSTEM_NAME}" HOST_TARGET_OS)
+if(HOST_TARGET_OS STREQUAL "darwin")
set(HOST_TARGET_OS "macos")
-else()
- string(TOLOWER "${CMAKE_HOST_SYSTEM_NAME}" HOST_TARGET_OS)
endif()
set(HOST_TARGET_TRIPLE "${HOST_TARGET_ARCH}-${HOST_TARGET_OS}")
diff --git a/LICENSE b/LICENSE
index 6fda845a81..c8e2b0ff3f 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (Expat)
-Copyright (c) 2015-2022, Zig contributors
+Copyright (c) 2015-2023, Zig contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/build.zig b/build.zig
index f2a7e48dc7..c11329dbf3 100644
--- a/build.zig
+++ b/build.zig
@@ -106,7 +106,7 @@ pub fn build(b: *Builder) !void {
.install_dir = .lib,
.install_subdir = "zig",
.exclude_extensions = &[_][]const u8{
- // exclude files from lib/std/compress/
+ // exclude files from lib/std/compress/testdata
".gz",
".z.0",
".z.9",
@@ -532,6 +532,10 @@ fn addCmakeCfgOptionsToExe(
exe: *std.build.LibExeObjStep,
use_zig_libcxx: bool,
) !void {
+ if (exe.target.isDarwin()) {
+ // useful for package maintainers
+ exe.headerpad_max_install_names = true;
+ }
exe.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
cfg.cmake_binary_dir,
"zigcpp",
@@ -560,24 +564,38 @@ fn addCmakeCfgOptionsToExe(
// System -lc++ must be used because in this code path we are attempting to link
// against system-provided LLVM, Clang, LLD.
- if (exe.target.getOsTag() == .linux) {
- // First we try to link against gcc libstdc++. If that doesn't work, we fall
- // back to -lc++ and cross our fingers.
- addCxxKnownPath(b, cfg, exe, b.fmt("libstdc++.{s}", .{lib_suffix}), "", need_cpp_includes) catch |err| switch (err) {
- error.RequiredLibraryNotFound => {
- exe.linkSystemLibrary("c++");
- },
- else => |e| return e,
- };
- exe.linkSystemLibrary("unwind");
- } else if (exe.target.isFreeBSD()) {
- try addCxxKnownPath(b, cfg, exe, b.fmt("libc++.{s}", .{lib_suffix}), null, need_cpp_includes);
- exe.linkSystemLibrary("pthread");
- } else if (exe.target.getOsTag() == .openbsd) {
- try addCxxKnownPath(b, cfg, exe, b.fmt("libc++.{s}", .{lib_suffix}), null, need_cpp_includes);
- try addCxxKnownPath(b, cfg, exe, b.fmt("libc++abi.{s}", .{lib_suffix}), null, need_cpp_includes);
- } else if (exe.target.isDarwin()) {
- exe.linkSystemLibrary("c++");
+ switch (exe.target.getOsTag()) {
+ .linux => {
+ // First we try to link against gcc libstdc++. If that doesn't work, we fall
+ // back to -lc++ and cross our fingers.
+ addCxxKnownPath(b, cfg, exe, b.fmt("libstdc++.{s}", .{lib_suffix}), "", need_cpp_includes) catch |err| switch (err) {
+ error.RequiredLibraryNotFound => {
+ exe.linkSystemLibrary("c++");
+ },
+ else => |e| return e,
+ };
+ exe.linkSystemLibrary("unwind");
+ },
+ .ios, .macos, .watchos, .tvos => {
+ exe.linkSystemLibrary("c++");
+ },
+ .freebsd => {
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libc++.{s}", .{lib_suffix}), null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libgcc_eh.{s}", .{lib_suffix}), null, need_cpp_includes);
+ },
+ .openbsd => {
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libc++.{s}", .{lib_suffix}), null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libc++abi.{s}", .{lib_suffix}), null, need_cpp_includes);
+ },
+ .netbsd => {
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libstdc++.{s}", .{lib_suffix}), null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libgcc_eh.{s}", .{lib_suffix}), null, need_cpp_includes);
+ },
+ .dragonfly => {
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libstdc++.{s}", .{lib_suffix}), null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, b.fmt("libgcc_eh.{s}", .{lib_suffix}), null, need_cpp_includes);
+ },
+ else => {},
}
}
diff --git a/ci/aarch64-linux-debug.sh b/ci/aarch64-linux-debug.sh
index 758085c759..ca085d2779 100644
--- a/ci/aarch64-linux-debug.sh
+++ b/ci/aarch64-linux-debug.sh
@@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.534+b0b1cc356"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.971+19056cb68"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
diff --git a/ci/aarch64-linux-release.sh b/ci/aarch64-linux-release.sh
index 59b7d7f9b9..e3dc4530a8 100644
--- a/ci/aarch64-linux-release.sh
+++ b/ci/aarch64-linux-release.sh
@@ -8,9 +8,9 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.534+b0b1cc356"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.971+19056cb68"
PREFIX="$HOME/deps/$CACHE_BASENAME"
-ZIG="$PREFIX/bin/zig"
+ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v2.0.2-$ARCH-linux:$PATH"
diff --git a/ci/aarch64-macos.sh b/ci/aarch64-macos.sh
index 337c2bfa8e..b4533e149f 100755
--- a/ci/aarch64-macos.sh
+++ b/ci/aarch64-macos.sh
@@ -4,7 +4,7 @@ set -x
set -e
# Script assumes the presence of the following:
-# s3cmd
+# s3cmd
ZIGDIR="$(pwd)"
TARGET="$ARCH-macos-none"
@@ -52,3 +52,19 @@ stage3-release/bin/zig build test docs \
# Produce the experimental std lib documentation.
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
+
+# Ensure that stage3 and stage4 are byte-for-byte identical.
+stage3-release/bin/zig build \
+ --prefix stage4-release \
+ -Denable-llvm \
+ -Dno-lib \
+ -Drelease \
+ -Dstrip \
+ -Dtarget=$TARGET \
+ -Duse-zig-libcxx \
+ -Dversion-string="$(stage3-release/bin/zig version)"
+
+# diff returns an error code if the files differ.
+echo "If the following command fails, it means nondeterminism has been"
+echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
+diff stage3-release/bin/zig stage4-release/bin/zig
diff --git a/ci/x86_64-linux-debug.sh b/ci/x86_64-linux-debug.sh
index 069dc78657..d3e16a3954 100755
--- a/ci/x86_64-linux-debug.sh
+++ b/ci/x86_64-linux-debug.sh
@@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.448+e6e459e9e"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.971+19056cb68"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
diff --git a/ci/x86_64-linux-release.sh b/ci/x86_64-linux-release.sh
index 06f9e48c66..cec08fae84 100755
--- a/ci/x86_64-linux-release.sh
+++ b/ci/x86_64-linux-release.sh
@@ -8,9 +8,9 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.448+e6e459e9e"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.11.0-dev.971+19056cb68"
PREFIX="$HOME/deps/$CACHE_BASENAME"
-ZIG="$PREFIX/bin/zig"
+ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v2.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-6.1.0.1/bin:$PATH"
diff --git a/ci/x86_64-macos.sh b/ci/x86_64-macos.sh
index f09121ccd0..4ea62bf1e0 100755
--- a/ci/x86_64-macos.sh
+++ b/ci/x86_64-macos.sh
@@ -4,7 +4,7 @@ set -x
set -e
# Script assumes the presence of the following:
-# s3cmd
+# s3cmd
ZIGDIR="$(pwd)"
TARGET="$ARCH-macos-none"
@@ -60,3 +60,19 @@ stage3-release/bin/zig build test docs \
# Produce the experimental std lib documentation.
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
+
+# Ensure that stage3 and stage4 are byte-for-byte identical.
+stage3-release/bin/zig build \
+ --prefix stage4-release \
+ -Denable-llvm \
+ -Dno-lib \
+ -Drelease \
+ -Dstrip \
+ -Dtarget=$TARGET \
+ -Duse-zig-libcxx \
+ -Dversion-string="$(stage3-release/bin/zig version)"
+
+# diff returns an error code if the files differ.
+echo "If the following command fails, it means nondeterminism has been"
+echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
+diff stage3-release/bin/zig stage4-release/bin/zig
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 2e235aa80f..b8b7d63ee0 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -300,6 +300,40 @@
color: #fff;
}
}
+
+ @media all {
+ main {
+ counter-reset: section-2;
+ }
+ h2 {
+ counter-reset: section-3;
+ }
+ h2::before {
+ counter-increment: section-2;
+ content: counter(section-2) ". ";
+ font-weight: normal;
+ }
+ h3 {
+ counter-reset: section-4;
+ }
+ h3::before {
+ counter-increment: section-3;
+ content: counter(section-2) "." counter(section-3) ". ";
+ font-weight: normal;
+ }
+ h4::before {
+ counter-increment: section-4;
+ content: counter(section-2) "." counter(section-3) "." counter(section-4) ". ";
+ font-weight: normal;
+ }
+ #zig-version::before {
+ content: "";
+ }
+ #table-of-contents::before {
+ content: "";
+ }
+ }
+ }
</style>
</head>
<body>
@@ -2305,7 +2339,7 @@ comptime {
assert(message.len == 5);
}
-// A string literal is a single-item pointer to an array literal.
+// A string literal is a single-item pointer to an array.
const same_message = "hello";
comptime {
@@ -2399,43 +2433,6 @@ test "array initialization with function calls" {
{#code_end#}
{#see_also|for|Slices#}
- {#header_open|Anonymous List Literals#}
- <p>Similar to {#link|Enum Literals#} and {#link|Anonymous Struct Literals#}
- the type can be omitted from array literals:</p>
- {#code_begin|test|anon_list#}
-const std = @import("std");
-const expect = std.testing.expect;
-
-test "anonymous list literal syntax" {
- var array: [4]u8 = .{11, 22, 33, 44};
- try expect(array[0] == 11);
- try expect(array[1] == 22);
- try expect(array[2] == 33);
- try expect(array[3] == 44);
-}
- {#code_end#}
- <p>
- If there is no type in the {#link|result location|Result Location Semantics#} then an
- anonymous list literal actually turns into a {#link|struct#} with numbered field names:
- </p>
- {#code_begin|test|infer_list_literal#}
-const std = @import("std");
-const expect = std.testing.expect;
-
-test "fully anonymous list literal" {
- try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi"});
-}
-
-fn dump(args: anytype) !void {
- try expect(args.@"0" == 1234);
- try expect(args.@"1" == 12.34);
- try expect(args.@"2");
- try expect(args.@"3"[0] == 'h');
- try expect(args.@"3"[1] == 'i');
-}
- {#code_end#}
- {#header_close#}
-
{#header_open|Multidimensional Arrays#}
<p>
Multidimensional arrays can be created by nesting arrays:
@@ -3544,15 +3541,21 @@ fn dump(args: anytype) !void {
try expect(args.s[1] == 'i');
}
{#code_end#}
+ {#header_close#}
+
+ {#header_open|Tuples#}
<p>
Anonymous structs can be created without specifying field names, and are referred to as "tuples".
</p>
<p>
The fields are implicitly named using numbers starting from 0. Because their names are integers,
- the {#syntax#}@"0"{#endsyntax#} syntax must be used to access them. Names inside {#syntax#}@""{#endsyntax#} are always recognised as {#link|identifiers|Identifiers#}.
+ they cannot be accessed with {#syntax#}.{#endsyntax#} syntax without also wrapping them in
+ {#syntax#}@""{#endsyntax#}. Names inside {#syntax#}@""{#endsyntax#} are always recognised as
+ {#link|identifiers|Identifiers#}.
</p>
<p>
- Like arrays, tuples have a .len field, can be indexed and work with the ++ and ** operators. They can also be iterated over with {#link|inline for#}.
+ Like arrays, tuples have a .len field, can be indexed (provided the index is comptime-known)
+ and work with the ++ and ** operators. They can also be iterated over with {#link|inline for#}.
</p>
{#code_begin|test|tuple#}
const std = @import("std");
@@ -3599,9 +3602,8 @@ const Value = enum(u2) {
one,
two,
};
-
// Now you can cast between u2 and Value.
-// The ordinal value starts from 0, counting up for each member.
+// The ordinal value starts from 0, counting up by 1 from the previous member.
test "enum ordinal value" {
try expect(@enumToInt(Value.zero) == 0);
try expect(@enumToInt(Value.one) == 1);
@@ -3620,6 +3622,22 @@ test "set enum ordinal value" {
try expect(@enumToInt(Value2.million) == 1000000);
}
+// You can also override only some values.
+const Value3 = enum(u4) {
+ a,
+ b = 8,
+ c,
+ d = 4,
+ e,
+};
+test "enum implicit ordinal values and overridden values" {
+ try expect(@enumToInt(Value3.a) == 0);
+ try expect(@enumToInt(Value3.b) == 8);
+ try expect(@enumToInt(Value3.c) == 9);
+ try expect(@enumToInt(Value3.d) == 4);
+ try expect(@enumToInt(Value3.e) == 5);
+}
+
// Enums can have methods, the same as structs and unions.
// Enum methods are not special, they are only namespaced
// functions that you can call with dot syntax.
@@ -4253,7 +4271,7 @@ fn isFieldOptional(comptime T: type, field_index: usize) !bool {
return switch (field_index) {
// This prong is analyzed `fields.len - 1` times with `idx` being an
// unique comptime-known value each time.
- inline 0...fields.len - 1 => |idx| @typeInfo(fields[idx].field_type) == .Optional,
+ inline 0...fields.len - 1 => |idx| @typeInfo(fields[idx].type) == .Optional,
else => return error.IndexOutOfBounds,
};
}
@@ -5242,7 +5260,7 @@ const math = std.math;
const testing = std.testing;
test "fn reflection" {
- try testing.expect(@typeInfo(@TypeOf(testing.expect)).Fn.args[0].arg_type.? == bool);
+ try testing.expect(@typeInfo(@TypeOf(testing.expect)).Fn.params[0].type.? == bool);
try testing.expect(@typeInfo(@TypeOf(testing.tmpDir)).Fn.return_type.? == testing.TmpDir);
try testing.expect(@typeInfo(@TypeOf(math.Log2Int)).Fn.is_generic);
@@ -5364,14 +5382,14 @@ pub fn parseU64(buf: []const u8, radix: u8) !u64 {
}
// x *= radix
- if (@mulWithOverflow(u64, x, radix, &x)) {
- return error.Overflow;
- }
+ var ov = @mulWithOverflow(x, radix);
+ if (ov[1] != 0) return error.OverFlow;
+
// x += digit
- if (@addWithOverflow(u64, x, digit, &x)) {
- return error.Overflow;
- }
+ ov = @addWithOverflow(ov[0], digit);
+ if (ov[1] != 0) return error.OverFlow;
+ x = ov[0];
}
return x;
@@ -5783,14 +5801,16 @@ test "merge error sets" {
{#code_begin|test|inferred_error_sets#}
// With an inferred error set
pub fn add_inferred(comptime T: type, a: T, b: T) !T {
- var answer: T = undefined;
- return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer;
+ const ov = @addWithOverflow(a, b);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
// With an explicit error set
pub fn add_explicit(comptime T: type, a: T, b: T) Error!T {
- var answer: T = undefined;
- return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer;
+ const ov = @addWithOverflow(a, b);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
const Error = error {
@@ -6437,7 +6457,22 @@ test "coercion between unions and enums" {
{#see_also|union|enum#}
{#header_close#}
{#header_open|Type Coercion: undefined#}
- <p>{#link|undefined#} can be cast to any type.</p>
+ <p>{#link|undefined#} can be coerced to any type.</p>
+ {#header_close#}
+
+ {#header_open|Type Coercion: tuples to arrays#}
+ <p>{#link|Tuples#} can be coerced to arrays, if all of the fields have the same type.</p>
+ {#code_begin|test|test_coerce_tuples_arrays#}
+const std = @import("std");
+const expect = std.testing.expect;
+
+const Tuple = struct{ u8, u8 };
+test "coercion from homogenous tuple to array" {
+ const tuple: Tuple = .{5, 6};
+ const array: [2]u8 = tuple;
+ _ = array;
+}
+ {#code_end#}
{#header_close#}
{#header_close#}
@@ -7583,11 +7618,9 @@ test "global assembly" {
</p>
{#header_close#}
{#header_open|@addWithOverflow#}
- <pre>{#syntax#}@addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool{#endsyntax#}</pre>
+ <pre>{#syntax#}@addWithOverflow(a: anytype, b: anytype) struct { @TypeOf(a, b), u1 }{#endsyntax#}</pre>
<p>
- Performs {#syntax#}result.* = a + b{#endsyntax#}. If overflow or underflow occurs,
- stores the overflowed bits in {#syntax#}result{#endsyntax#} and returns {#syntax#}true{#endsyntax#}.
- If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
+ Performs {#syntax#}a + b{#endsyntax#} and returns a tuple with the result and a possible overflow bit.
</p>
{#header_close#}
{#header_open|@alignCast#}
@@ -8088,6 +8121,35 @@ test "main" {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
+ {#header_open|@cVaArg#}
+ <pre>{#syntax#}@cVaArg(operand: *std.builtin.VaList, comptime T: type) T{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_arg{#endsyntax#}.
+ </p>
+ {#see_also|@cVaCopy|@cVaEnd|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaCopy#}
+ <pre>{#syntax#}@cVaCopy(src: *std.builtin.VaList) std.builtin.VaList{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_copy{#endsyntax#}.
+ </p>
+ {#see_also|@cVaArg|@cVaEnd|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaEnd#}
+ <pre>{#syntax#}@cVaEnd(src: *std.builtin.VaList) void{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_end{#endsyntax#}.
+ </p>
+ {#see_also|@cVaArg|@cVaCopy|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaStart#}
+ <pre>{#syntax#}@cVaStart() std.builtin.VaList{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_start{#endsyntax#}. Only valid inside a variadic function.
+ </p>
+ {#see_also|@cVaArg|@cVaCopy|@cVaEnd#}
+ {#header_close#}
+
{#header_open|@divExact#}
<pre>{#syntax#}@divExact(numerator: T, denominator: T) T{#endsyntax#}</pre>
<p>
@@ -8617,11 +8679,9 @@ test "@wasmMemoryGrow" {
{#header_close#}
{#header_open|@mulWithOverflow#}
- <pre>{#syntax#}@mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool{#endsyntax#}</pre>
+ <pre>{#syntax#}@mulWithOverflow(a: anytype, b: anytype) struct { @TypeOf(a, b), u1 }{#endsyntax#}</pre>
<p>
- Performs {#syntax#}result.* = a * b{#endsyntax#}. If overflow or underflow occurs,
- stores the overflowed bits in {#syntax#}result{#endsyntax#} and returns {#syntax#}true{#endsyntax#}.
- If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
+ Performs {#syntax#}a * b{#endsyntax#} and returns a tuple with the result and a possible overflow bit.
</p>
{#header_close#}
@@ -8895,15 +8955,13 @@ test "@setRuntimeSafety" {
{#header_close#}
{#header_open|@shlWithOverflow#}
- <pre>{#syntax#}@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool{#endsyntax#}</pre>
+ <pre>{#syntax#}@shlWithOverflow(a: anytype, shift_amt: Log2T) struct { @TypeOf(a), u1 }{#endsyntax#}</pre>
<p>
- Performs {#syntax#}result.* = a << b{#endsyntax#}. If overflow or underflow occurs,
- stores the overflowed bits in {#syntax#}result{#endsyntax#} and returns {#syntax#}true{#endsyntax#}.
- If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
+ Performs {#syntax#}a << b{#endsyntax#} and returns a tuple with the result and a possible overflow bit.
</p>
<p>
- The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits.
- This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
+ The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(@TypeOf(a)).Int.bits){#endsyntax#} bits.
+ This is because {#syntax#}shift_amt >= @typeInfo(@TypeOf(a)).Int.bits{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shlExact|@shrExact#}
{#header_close#}
@@ -9245,11 +9303,9 @@ fn doTheTest() !void {
{#header_close#}
{#header_open|@subWithOverflow#}
- <pre>{#syntax#}@subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool{#endsyntax#}</pre>
+ <pre>{#syntax#}@subWithOverflow(a: anytype, b: anytype) struct { @TypeOf(a, b), u1 }{#endsyntax#}</pre>
<p>
- Performs {#syntax#}result.* = a - b{#endsyntax#}. If overflow or underflow occurs,
- stores the overflowed bits in {#syntax#}result{#endsyntax#} and returns {#syntax#}true{#endsyntax#}.
- If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
+ Performs {#syntax#}a - b{#endsyntax#} and returns a tuple with the result and a possible overflow bit.
</p>
{#header_close#}
@@ -9696,11 +9752,11 @@ const print = @import("std").debug.print;
pub fn main() void {
var byte: u8 = 255;
- var result: u8 = undefined;
- if (@addWithOverflow(u8, byte, 10, &result)) {
- print("overflowed result: {}\n", .{result});
+ const ov = @addWithOverflow(byte, 10);
+ if (ov[1] != 0) {
+ print("overflowed result: {}\n", .{ov[0]});
} else {
- print("result: {}\n", .{result});
+ print("result: {}\n", .{ov[0]});
}
}
{#code_end#}
@@ -10802,14 +10858,32 @@ test "variadic function" {
}
{#code_end#}
<p>
- Non extern variadic functions are currently not implemented, but there
- is an accepted proposal. See <a href="https://github.com/ziglang/zig/issues/515">#515</a>.
+ Variadic functions can be implemented using {#link|@cVaStart#}, {#link|@cVaEnd#}, {#link|@cVaArg#} and {#link|@cVaCopy#}
</p>
- {#code_begin|obj_err|non-extern function is variadic#}
-export fn printf(format: [*:0]const u8, ...) c_int {
- _ = format;
+ {#code_begin|test|defining_variadic_function#}
+const std = @import("std");
+const testing = std.testing;
+const builtin = @import("builtin");
- return 0;
+fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ }
+ return sum;
+}
+
+test "defining a variadic function" {
+ // Variadic functions are currently disabled on some targets due to miscompilations.
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .windows and builtin.os.tag != .macos) return error.SkipZigTest;
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest;
+
+ try std.testing.expectEqual(@as(c_int, 0), add(0));
+ try std.testing.expectEqual(@as(c_int, 1), add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 3), add(2, @as(c_int, 1), @as(c_int, 2)));
}
{#code_end#}
{#header_close#}
@@ -11981,24 +12055,26 @@ ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerFiel
ContainerDeclarations
<- TestDecl ContainerDeclarations
- / TopLevelComptime ContainerDeclarations
- / doc_comment? KEYWORD_pub? TopLevelDecl ContainerDeclarations
+ / ComptimeDecl ContainerDeclarations
+ / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
/
-TestDecl <- doc_comment? KEYWORD_test STRINGLITERALSINGLE? Block
+TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
-TopLevelComptime <- doc_comment? KEYWORD_comptime BlockExpr
+ComptimeDecl <- KEYWORD_comptime Block
-TopLevelDecl
+Decl
<- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
/ (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
/ KEYWORD_usingnamespace Expr SEMICOLON
-FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
+FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
-VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON
+VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
-ContainerField <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON (KEYWORD_anytype / TypeExpr) ByteAlign?)? (EQUAL Expr)?
+ContainerField
+ <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
+ / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
# *** Block Level ***
Statement
@@ -12159,6 +12235,8 @@ WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
+AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
+
# Fn specific
CallConv <- KEYWORD_callconv LPAREN Expr RPAREN
@@ -12186,7 +12264,7 @@ PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
# Switch specific
-SwitchProng <- SwitchCase EQUALRARROW PtrPayload? AssignExpr
+SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
SwitchCase
<- SwitchItem (COMMA SwitchItem)* COMMA?
@@ -12197,11 +12275,15 @@ SwitchItem <- Expr (DOT3 Expr)?
# Operators
AssignOp
<- ASTERISKEQUAL
+ / ASTERISKPIPEEQUAL
/ SLASHEQUAL
/ PERCENTEQUAL
/ PLUSEQUAL
+ / PLUSPIPEEQUAL
/ MINUSEQUAL
+ / MINUSPIPEEQUAL
/ LARROW2EQUAL
+ / LARROW2PIPEEQUAL
/ RARROW2EQUAL
/ AMPERSANDEQUAL
/ CARETEQUAL
@@ -12229,6 +12311,7 @@ BitwiseOp
BitShiftOp
<- LARROW2
/ RARROW2
+ / LARROW2PIPE
AdditionOp
<- PLUS
@@ -12236,6 +12319,8 @@ AdditionOp
/ PLUS2
/ PLUSPERCENT
/ MINUSPERCENT
+ / PLUSPIPE
+ / MINUSPIPE
MultiplyOp
<- PIPE2
@@ -12244,6 +12329,7 @@ MultiplyOp
/ PERCENT
/ ASTERISK2
/ ASTERISKPERCENT
+ / ASTERISKPIPE
PrefixOp
<- EXCLAMATIONMARK
@@ -12257,8 +12343,8 @@ PrefixOp
PrefixTypeOp
<- QUESTIONMARK
/ KEYWORD_anyframe MINUSRARROW
- / SliceTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+ / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+ / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/ ArrayTypeStart
SuffixOp
@@ -12283,7 +12369,7 @@ ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
ContainerDeclType
- <- KEYWORD_struct
+ <- KEYWORD_struct (LPAREN Expr RPAREN)?
/ KEYWORD_opaque
/ KEYWORD_enum (LPAREN Expr RPAREN)?
/ KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
@@ -12373,8 +12459,8 @@ string_char
<- char_escape
/ [^\\"\n]
-container_doc_comment <- ('//!' [^\n]* [ \n]*)+
-doc_comment <- ('///' [^\n]* [ \n]*)+
+container_doc_comment <- ('//!' [^\n]* [ \n]* skip)+
+doc_comment <- ('///' [^\n]* [ \n]* skip)+
line_comment <- '//' ![!/][^\n]* / '////' [^\n]*
line_string <- ("\\\\" [^\n]* [ \n]*)+
skip <- ([ \n] / line_comment)*
@@ -12402,11 +12488,13 @@ BUILTINIDENTIFIER <- "@"[A-Za-z_][A-Za-z0-9_]* skip
AMPERSAND <- '&' ![=] skip
AMPERSANDEQUAL <- '&=' skip
-ASTERISK <- '*' ![*%=] skip
+ASTERISK <- '*' ![*%=|] skip
ASTERISK2 <- '**' skip
ASTERISKEQUAL <- '*=' skip
ASTERISKPERCENT <- '*%' ![=] skip
ASTERISKPERCENTEQUAL <- '*%=' skip
+ASTERISKPIPE <- '*|' ![=] skip
+ASTERISKPIPEEQUAL <- '*|=' skip
CARET <- '^' ![=] skip
CARETEQUAL <- '^=' skip
COLON <- ':' skip
@@ -12422,27 +12510,33 @@ EQUALRARROW <- '=>' skip
EXCLAMATIONMARK <- '!' ![=] skip
EXCLAMATIONMARKEQUAL <- '!=' skip
LARROW <- '<' ![<=] skip
-LARROW2 <- '<<' ![=] skip
+LARROW2 <- '<<' ![=|] skip
LARROW2EQUAL <- '<<=' skip
+LARROW2PIPE <- '<<|' ![=] skip
+LARROW2PIPEEQUAL <- '<<|=' skip
LARROWEQUAL <- '<=' skip
LBRACE <- '{' skip
LBRACKET <- '[' skip
LPAREN <- '(' skip
-MINUS <- '-' ![%=>] skip
+MINUS <- '-' ![%=>|] skip
MINUSEQUAL <- '-=' skip
MINUSPERCENT <- '-%' ![=] skip
MINUSPERCENTEQUAL <- '-%=' skip
+MINUSPIPE <- '-|' ![=] skip
+MINUSPIPEEQUAL <- '-|=' skip
MINUSRARROW <- '->' skip
PERCENT <- '%' ![=] skip
PERCENTEQUAL <- '%=' skip
PIPE <- '|' ![|=] skip
PIPE2 <- '||' skip
PIPEEQUAL <- '|=' skip
-PLUS <- '+' ![%+=] skip
+PLUS <- '+' ![%+=|] skip
PLUS2 <- '++' skip
PLUSEQUAL <- '+=' skip
PLUSPERCENT <- '+%' ![=] skip
PLUSPERCENTEQUAL <- '+%=' skip
+PLUSPIPE <- '+|' ![=] skip
+PLUSPIPEEQUAL <- '+|=' skip
LETTERC <- 'c' skip
QUESTIONMARK <- '?' skip
RARROW <- '>' ![>=] skip
@@ -12458,6 +12552,7 @@ SLASHEQUAL <- '/=' skip
TILDE <- '~' skip
end_of_word <- ![a-zA-Z0-9_] skip
+KEYWORD_addrspace <- 'addrspace' end_of_word
KEYWORD_align <- 'align' end_of_word
KEYWORD_allowzero <- 'allowzero' end_of_word
KEYWORD_and <- 'and' end_of_word
@@ -12507,11 +12602,11 @@ KEYWORD_var <- 'var' end_of_word
KEYWORD_volatile <- 'volatile' end_of_word
KEYWORD_while <- 'while' end_of_word
-keyword <- KEYWORD_align / KEYWORD_allowzero / KEYWORD_and / KEYWORD_anyframe
- / KEYWORD_anytype / KEYWORD_asm / KEYWORD_async / KEYWORD_await
- / KEYWORD_break / KEYWORD_callconv / KEYWORD_catch / KEYWORD_comptime
- / KEYWORD_const / KEYWORD_continue / KEYWORD_defer / KEYWORD_else
- / KEYWORD_enum / KEYWORD_errdefer / KEYWORD_error / KEYWORD_export
+keyword <- KEYWORD_addrspace / KEYWORD_align / KEYWORD_allowzero / KEYWORD_and
+ / KEYWORD_anyframe / KEYWORD_anytype / KEYWORD_asm / KEYWORD_async
+ / KEYWORD_await / KEYWORD_break / KEYWORD_callconv / KEYWORD_catch
+ / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue / KEYWORD_defer
+ / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer / KEYWORD_error / KEYWORD_export
/ KEYWORD_extern / KEYWORD_fn / KEYWORD_for / KEYWORD_if
/ KEYWORD_inline / KEYWORD_noalias / KEYWORD_nosuspend / KEYWORD_noinline
/ KEYWORD_opaque / KEYWORD_or / KEYWORD_orelse / KEYWORD_packed
diff --git a/lib/compiler_rt.zig b/lib/compiler_rt.zig
index 5cce3daf29..96fdc0d4ab 100644
--- a/lib/compiler_rt.zig
+++ b/lib/compiler_rt.zig
@@ -112,6 +112,7 @@ comptime {
_ = @import("compiler_rt/modti3.zig");
_ = @import("compiler_rt/multi3.zig");
_ = @import("compiler_rt/udivti3.zig");
+ _ = @import("compiler_rt/udivmodei4.zig");
_ = @import("compiler_rt/udivmodti4.zig");
_ = @import("compiler_rt/umodti3.zig");
diff --git a/lib/compiler_rt/absvdi2.zig b/lib/compiler_rt/absvdi2.zig
index 7ebf561ae5..14cd1d8393 100644
--- a/lib/compiler_rt/absvdi2.zig
+++ b/lib/compiler_rt/absvdi2.zig
@@ -4,7 +4,7 @@ const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
- @export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage });
+ @export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvdi2(a: i64) callconv(.C) i64 {
diff --git a/lib/compiler_rt/absvsi2.zig b/lib/compiler_rt/absvsi2.zig
index 664925f8f9..9ca9297c56 100644
--- a/lib/compiler_rt/absvsi2.zig
+++ b/lib/compiler_rt/absvsi2.zig
@@ -4,7 +4,7 @@ const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
- @export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage });
+ @export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvsi2(a: i32) callconv(.C) i32 {
diff --git a/lib/compiler_rt/absvti2.zig b/lib/compiler_rt/absvti2.zig
index f7d0f796b0..f866f4c2c1 100644
--- a/lib/compiler_rt/absvti2.zig
+++ b/lib/compiler_rt/absvti2.zig
@@ -4,7 +4,7 @@ const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
- @export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage });
+ @export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvti2(a: i128) callconv(.C) i128 {
diff --git a/lib/compiler_rt/adddf3.zig b/lib/compiler_rt/adddf3.zig
index 1b511f78a4..10ae9c328a 100644
--- a/lib/compiler_rt/adddf3.zig
+++ b/lib/compiler_rt/adddf3.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage });
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage });
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/addhf3.zig b/lib/compiler_rt/addhf3.zig
index 12086aef38..013c2ea714 100644
--- a/lib/compiler_rt/addhf3.zig
+++ b/lib/compiler_rt/addhf3.zig
@@ -4,7 +4,7 @@ const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
- @export(__addhf3, .{ .name = "__addhf3", .linkage = common.linkage });
+ @export(__addhf3, .{ .name = "__addhf3", .linkage = common.linkage, .visibility = common.visibility });
}
fn __addhf3(a: f16, b: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/addo.zig b/lib/compiler_rt/addo.zig
index d14fe36710..5248dfb8b8 100644
--- a/lib/compiler_rt/addo.zig
+++ b/lib/compiler_rt/addo.zig
@@ -1,13 +1,13 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+const common = @import("./common.zig");
pub const panic = @import("common.zig").panic;
comptime {
- @export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
- @export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
- @export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
+ @export(__addosi4, .{ .name = "__addosi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__addodi4, .{ .name = "__addodi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__addoti4, .{ .name = "__addoti4", .linkage = common.linkage, .visibility = common.visibility });
}
// addo - add overflow
diff --git a/lib/compiler_rt/addsf3.zig b/lib/compiler_rt/addsf3.zig
index 83f8285371..59e96da432 100644
--- a/lib/compiler_rt/addsf3.zig
+++ b/lib/compiler_rt/addsf3.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage });
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage });
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/addtf3.zig b/lib/compiler_rt/addtf3.zig
index 15c450e193..e9a077fad5 100644
--- a/lib/compiler_rt/addtf3.zig
+++ b/lib/compiler_rt/addtf3.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__addtf3, .{ .name = "__addkf3", .linkage = common.linkage });
+ @export(__addtf3, .{ .name = "__addkf3", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_add, .{ .name = "_Qp_add", .linkage = common.linkage });
+ @export(_Qp_add, .{ .name = "_Qp_add", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage });
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
diff --git a/lib/compiler_rt/addxf3.zig b/lib/compiler_rt/addxf3.zig
index 72cf955632..babb11ab80 100644
--- a/lib/compiler_rt/addxf3.zig
+++ b/lib/compiler_rt/addxf3.zig
@@ -4,7 +4,7 @@ const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
- @export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage });
+ @export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig
index 145d3992f7..27667d3b60 100644
--- a/lib/compiler_rt/arm.zig
+++ b/lib/compiler_rt/arm.zig
@@ -9,34 +9,34 @@ pub const panic = common.panic;
comptime {
if (!builtin.is_test) {
if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = common.linkage });
- @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = common.linkage });
- @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = common.linkage });
+ @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = common.linkage });
- @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = common.linkage });
+ @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = common.linkage });
- @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = common.linkage });
+ @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = common.linkage });
- @export(__aeabi_memcpy4, .{ .name = "__aeabi_memcpy4", .linkage = common.linkage });
- @export(__aeabi_memcpy8, .{ .name = "__aeabi_memcpy8", .linkage = common.linkage });
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memcpy4, .{ .name = "__aeabi_memcpy4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memcpy8, .{ .name = "__aeabi_memcpy8", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = common.linkage });
- @export(__aeabi_memmove4, .{ .name = "__aeabi_memmove4", .linkage = common.linkage });
- @export(__aeabi_memmove8, .{ .name = "__aeabi_memmove8", .linkage = common.linkage });
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memmove4, .{ .name = "__aeabi_memmove4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memmove8, .{ .name = "__aeabi_memmove8", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = common.linkage });
- @export(__aeabi_memset4, .{ .name = "__aeabi_memset4", .linkage = common.linkage });
- @export(__aeabi_memset8, .{ .name = "__aeabi_memset8", .linkage = common.linkage });
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memset4, .{ .name = "__aeabi_memset4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memset8, .{ .name = "__aeabi_memset8", .linkage = common.linkage, .visibility = common.visibility });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = common.linkage });
- @export(__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage });
- @export(__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage });
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage, .visibility = common.visibility });
if (builtin.os.tag == .linux) {
- @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage });
+ @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage, .visibility = common.visibility });
}
}
}
diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig
index 13ca3a7197..73bf3fa03b 100644
--- a/lib/compiler_rt/atomics.zig
+++ b/lib/compiler_rt/atomics.zig
@@ -1,9 +1,11 @@
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("./common.zig");
const cpu = builtin.cpu;
const arch = cpu.arch;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
+const linkage = common.linkage;
+const visibility = common.visibility;
+pub const panic = common.panic;
// This parameter is true iff the target architecture supports the bare minimum
// to implement the atomic load/store intrinsics.
@@ -454,59 +456,59 @@ fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
comptime {
if (supports_atomic_ops and builtin.object_format != .c) {
- @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage });
- @export(__atomic_store, .{ .name = "__atomic_store", .linkage = linkage });
- @export(__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage });
- @export(__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage });
-
- @export(__atomic_fetch_add_1, .{ .name = "__atomic_fetch_add_1", .linkage = linkage });
- @export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage });
- @export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage });
- @export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage });
-
- @export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage });
- @export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage });
- @export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage });
- @export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage });
-
- @export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage });
- @export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage });
- @export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage });
- @export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage });
-
- @export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage });
- @export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage });
- @export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage });
- @export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage });
-
- @export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage });
- @export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage });
- @export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage });
- @export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage });
-
- @export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage });
- @export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage });
- @export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage });
- @export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage });
-
- @export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage });
- @export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage });
- @export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage });
- @export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage });
-
- @export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage });
- @export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage });
- @export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage });
- @export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage });
-
- @export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage });
- @export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage });
- @export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage });
- @export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage });
-
- @export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage });
- @export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage });
- @export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage });
- @export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage });
+ @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_store, .{ .name = "__atomic_store", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_add_1, .{ .name = "__atomic_fetch_add_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage, .visibility = visibility });
+
+ @export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage, .visibility = visibility });
+ @export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage, .visibility = visibility });
}
}
diff --git a/lib/compiler_rt/aulldiv.zig b/lib/compiler_rt/aulldiv.zig
index 3443e6ca09..6c7d90f948 100644
--- a/lib/compiler_rt/aulldiv.zig
+++ b/lib/compiler_rt/aulldiv.zig
@@ -9,8 +9,8 @@ pub const panic = common.panic;
comptime {
if (arch == .x86 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
- @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage });
- @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage });
+ @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/aullrem.zig b/lib/compiler_rt/aullrem.zig
index 6960e1eceb..63a35e518f 100644
--- a/lib/compiler_rt/aullrem.zig
+++ b/lib/compiler_rt/aullrem.zig
@@ -9,8 +9,8 @@ pub const panic = common.panic;
comptime {
if (arch == .x86 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
- @export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage });
- @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage });
+ @export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/bcmp.zig b/lib/compiler_rt/bcmp.zig
index add6858ffe..bc8d43b48e 100644
--- a/lib/compiler_rt/bcmp.zig
+++ b/lib/compiler_rt/bcmp.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const common = @import("./common.zig");
comptime {
- @export(bcmp, .{ .name = "bcmp", .linkage = common.linkage });
+ @export(bcmp, .{ .name = "bcmp", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
diff --git a/lib/compiler_rt/bswap.zig b/lib/compiler_rt/bswap.zig
index 9f7d2cb879..ed330810f0 100644
--- a/lib/compiler_rt/bswap.zig
+++ b/lib/compiler_rt/bswap.zig
@@ -5,9 +5,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = common.linkage });
- @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = common.linkage });
- @export(__bswapti2, .{ .name = "__bswapti2", .linkage = common.linkage });
+ @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__bswapti2, .{ .name = "__bswapti2", .linkage = common.linkage, .visibility = common.visibility });
}
// bswap - byteswap
diff --git a/lib/compiler_rt/ceil.zig b/lib/compiler_rt/ceil.zig
index 6622546501..2765ed9f74 100644
--- a/lib/compiler_rt/ceil.zig
+++ b/lib/compiler_rt/ceil.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__ceilh, .{ .name = "__ceilh", .linkage = common.linkage });
- @export(ceilf, .{ .name = "ceilf", .linkage = common.linkage });
- @export(ceil, .{ .name = "ceil", .linkage = common.linkage });
- @export(__ceilx, .{ .name = "__ceilx", .linkage = common.linkage });
+ @export(__ceilh, .{ .name = "__ceilh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(ceilf, .{ .name = "ceilf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(ceil, .{ .name = "ceil", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ceilx, .{ .name = "__ceilx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(ceilq, .{ .name = "ceilf128", .linkage = common.linkage });
+ @export(ceilq, .{ .name = "ceilf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(ceilq, .{ .name = "ceilq", .linkage = common.linkage });
- @export(ceill, .{ .name = "ceill", .linkage = common.linkage });
+ @export(ceilq, .{ .name = "ceilq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(ceill, .{ .name = "ceill", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __ceilh(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/clzsi2_test.zig b/lib/compiler_rt/clzsi2_test.zig
index ef64e24fe1..fc0790ef71 100644
--- a/lib/compiler_rt/clzsi2_test.zig
+++ b/lib/compiler_rt/clzsi2_test.zig
@@ -3,24 +3,11 @@ const clz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__clzsi2(a: u32, expected: i32) !void {
- // stage1 and stage2 diverge on function pointer semantics
- switch (builtin.zig_backend) {
- .stage1 => {
- // Use of `var` here is working around a stage1 bug.
- var nakedClzsi2 = clz.__clzsi2;
- var actualClzsi2 = @ptrCast(fn (a: i32) callconv(.C) i32, nakedClzsi2);
- var x = @bitCast(i32, a);
- var result = actualClzsi2(x);
- try testing.expectEqual(expected, result);
- },
- else => {
- const nakedClzsi2 = clz.__clzsi2;
- const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
- const x = @bitCast(i32, a);
- const result = actualClzsi2(x);
- try testing.expectEqual(expected, result);
- },
- }
+ const nakedClzsi2 = clz.__clzsi2;
+ const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
+ const x = @bitCast(i32, a);
+ const result = actualClzsi2(x);
+ try testing.expectEqual(expected, result);
}
test "clzsi2" {
@@ -281,7 +268,8 @@ test "clzsi2" {
try test__clzsi2(0xFE000000, 0);
try test__clzsi2(0xFF000000, 0);
// arm and thumb1 assume input a != 0
- //try test__clzsi2(0x00000000, 32);
+ if (!builtin.cpu.arch.isARM() and !builtin.cpu.arch.isThumb())
+ try test__clzsi2(0x00000000, 32);
try test__clzsi2(0x00000001, 31);
try test__clzsi2(0x00000002, 30);
try test__clzsi2(0x00000004, 29);
diff --git a/lib/compiler_rt/cmp.zig b/lib/compiler_rt/cmp.zig
index 8ff2c38cd4..cebc38c2ae 100644
--- a/lib/compiler_rt/cmp.zig
+++ b/lib/compiler_rt/cmp.zig
@@ -6,12 +6,12 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = common.linkage });
- @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = common.linkage });
- @export(__cmpti2, .{ .name = "__cmpti2", .linkage = common.linkage });
- @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = common.linkage });
- @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = common.linkage });
- @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = common.linkage });
+ @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmpti2, .{ .name = "__cmpti2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = common.linkage, .visibility = common.visibility });
}
// cmp - signed compare
diff --git a/lib/compiler_rt/cmpdf2.zig b/lib/compiler_rt/cmpdf2.zig
index 67dbcd8b4d..8a7b37c2c9 100644
--- a/lib/compiler_rt/cmpdf2.zig
+++ b/lib/compiler_rt/cmpdf2.zig
@@ -7,15 +7,15 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage });
- @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage });
- @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage });
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage });
- @export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage });
- @export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage });
- @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage });
- @export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage });
+ @export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/cmphf2.zig b/lib/compiler_rt/cmphf2.zig
index d5ee3f1daa..d801b30ff6 100644
--- a/lib/compiler_rt/cmphf2.zig
+++ b/lib/compiler_rt/cmphf2.zig
@@ -6,11 +6,11 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__eqhf2, .{ .name = "__eqhf2", .linkage = common.linkage });
- @export(__nehf2, .{ .name = "__nehf2", .linkage = common.linkage });
- @export(__lehf2, .{ .name = "__lehf2", .linkage = common.linkage });
- @export(__cmphf2, .{ .name = "__cmphf2", .linkage = common.linkage });
- @export(__lthf2, .{ .name = "__lthf2", .linkage = common.linkage });
+ @export(__eqhf2, .{ .name = "__eqhf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__nehf2, .{ .name = "__nehf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lehf2, .{ .name = "__lehf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmphf2, .{ .name = "__cmphf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lthf2, .{ .name = "__lthf2", .linkage = common.linkage, .visibility = common.visibility });
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
diff --git a/lib/compiler_rt/cmpsf2.zig b/lib/compiler_rt/cmpsf2.zig
index 1ac40ef6e2..35e250e810 100644
--- a/lib/compiler_rt/cmpsf2.zig
+++ b/lib/compiler_rt/cmpsf2.zig
@@ -7,15 +7,15 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage });
- @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage });
- @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage });
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage });
- @export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage });
- @export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage });
- @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage });
- @export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage });
+ @export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/cmptf2.zig b/lib/compiler_rt/cmptf2.zig
index 98137e3dfc..bc53afc625 100644
--- a/lib/compiler_rt/cmptf2.zig
+++ b/lib/compiler_rt/cmptf2.zig
@@ -7,24 +7,24 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__eqtf2, .{ .name = "__eqkf2", .linkage = common.linkage });
- @export(__netf2, .{ .name = "__nekf2", .linkage = common.linkage });
- @export(__lttf2, .{ .name = "__ltkf2", .linkage = common.linkage });
- @export(__letf2, .{ .name = "__lekf2", .linkage = common.linkage });
+ @export(__eqtf2, .{ .name = "__eqkf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__netf2, .{ .name = "__nekf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lttf2, .{ .name = "__ltkf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__letf2, .{ .name = "__lekf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = common.linkage });
- @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = common.linkage });
- @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = common.linkage });
- @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = common.linkage });
- @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = common.linkage });
- @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = common.linkage });
- @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = common.linkage });
+ @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = common.linkage, .visibility = common.visibility });
+ @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage });
- @export(__netf2, .{ .name = "__netf2", .linkage = common.linkage });
- @export(__letf2, .{ .name = "__letf2", .linkage = common.linkage });
- @export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage });
- @export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage });
+ @export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__netf2, .{ .name = "__netf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__letf2, .{ .name = "__letf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage, .visibility = common.visibility });
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
diff --git a/lib/compiler_rt/cmpxf2.zig b/lib/compiler_rt/cmpxf2.zig
index 7286316f99..75355775bb 100644
--- a/lib/compiler_rt/cmpxf2.zig
+++ b/lib/compiler_rt/cmpxf2.zig
@@ -6,11 +6,11 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage });
- @export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage });
- @export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage });
- @export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage });
- @export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage });
+ @export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage, .visibility = common.visibility });
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig
index f16ecf0e18..40a770070d 100644
--- a/lib/compiler_rt/common.zig
+++ b/lib/compiler_rt/common.zig
@@ -2,6 +2,10 @@ const std = @import("std");
const builtin = @import("builtin");
pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+/// Determines the symbol's visibility to other objects.
+/// For WebAssembly this allows the symbol to be resolved to other modules, but will not
+/// export it to the host runtime.
+pub const visibility: std.builtin.SymbolVisibility = if (builtin.target.isWasm()) .hidden else .default;
pub const want_aeabi = switch (builtin.abi) {
.eabi,
.eabihf,
diff --git a/lib/compiler_rt/cos.zig b/lib/compiler_rt/cos.zig
index 664f6550a4..029b6c346a 100644
--- a/lib/compiler_rt/cos.zig
+++ b/lib/compiler_rt/cos.zig
@@ -12,15 +12,15 @@ const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
comptime {
- @export(__cosh, .{ .name = "__cosh", .linkage = common.linkage });
- @export(cosf, .{ .name = "cosf", .linkage = common.linkage });
- @export(cos, .{ .name = "cos", .linkage = common.linkage });
- @export(__cosx, .{ .name = "__cosx", .linkage = common.linkage });
+ @export(__cosh, .{ .name = "__cosh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(cosf, .{ .name = "cosf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(cos, .{ .name = "cos", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__cosx, .{ .name = "__cosx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(cosq, .{ .name = "cosf128", .linkage = common.linkage });
+ @export(cosq, .{ .name = "cosf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(cosq, .{ .name = "cosq", .linkage = common.linkage });
- @export(cosl, .{ .name = "cosl", .linkage = common.linkage });
+ @export(cosq, .{ .name = "cosq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(cosl, .{ .name = "cosl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __cosh(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig
index d763e5c8a3..e1500d0102 100644
--- a/lib/compiler_rt/count0bits.zig
+++ b/lib/compiler_rt/count0bits.zig
@@ -6,15 +6,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__clzsi2, .{ .name = "__clzsi2", .linkage = common.linkage });
- @export(__clzdi2, .{ .name = "__clzdi2", .linkage = common.linkage });
- @export(__clzti2, .{ .name = "__clzti2", .linkage = common.linkage });
- @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = common.linkage });
- @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = common.linkage });
- @export(__ctzti2, .{ .name = "__ctzti2", .linkage = common.linkage });
- @export(__ffssi2, .{ .name = "__ffssi2", .linkage = common.linkage });
- @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = common.linkage });
- @export(__ffsti2, .{ .name = "__ffsti2", .linkage = common.linkage });
+ @export(__clzsi2, .{ .name = "__clzsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__clzdi2, .{ .name = "__clzdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__clzti2, .{ .name = "__clzti2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ctzti2, .{ .name = "__ctzti2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ffssi2, .{ .name = "__ffssi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ffsti2, .{ .name = "__ffsti2", .linkage = common.linkage, .visibility = common.visibility });
}
// clz - count leading zeroes
diff --git a/lib/compiler_rt/divdc3.zig b/lib/compiler_rt/divdc3.zig
index 8aad682e61..ddcb0b83ea 100644
--- a/lib/compiler_rt/divdc3.zig
+++ b/lib/compiler_rt/divdc3.zig
@@ -4,7 +4,7 @@ const Complex = @import("./mulc3.zig").Complex;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__divdc3, .{ .name = "__divdc3", .linkage = common.linkage });
+ @export(__divdc3, .{ .name = "__divdc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig
index dd22f4836c..2f83d312dc 100644
--- a/lib/compiler_rt/divdf3.zig
+++ b/lib/compiler_rt/divdf3.zig
@@ -15,9 +15,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = common.linkage });
+ @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__divdf3, .{ .name = "__divdf3", .linkage = common.linkage });
+ @export(__divdf3, .{ .name = "__divdf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divhc3.zig b/lib/compiler_rt/divhc3.zig
index 3de8967593..7e9bfeb0ff 100644
--- a/lib/compiler_rt/divhc3.zig
+++ b/lib/compiler_rt/divhc3.zig
@@ -4,7 +4,7 @@ const Complex = @import("./mulc3.zig").Complex;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__divhc3, .{ .name = "__divhc3", .linkage = common.linkage });
+ @export(__divhc3, .{ .name = "__divhc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divhf3.zig b/lib/compiler_rt/divhf3.zig
index ad73a5ddb2..6bb607bef9 100644
--- a/lib/compiler_rt/divhf3.zig
+++ b/lib/compiler_rt/divhf3.zig
@@ -2,7 +2,7 @@ const common = @import("common.zig");
const divsf3 = @import("./divsf3.zig");
comptime {
- @export(__divhf3, .{ .name = "__divhf3", .linkage = common.linkage });
+ @export(__divhf3, .{ .name = "__divhf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/divsc3.zig b/lib/compiler_rt/divsc3.zig
index c9e0c0cbf5..20cdc56a09 100644
--- a/lib/compiler_rt/divsc3.zig
+++ b/lib/compiler_rt/divsc3.zig
@@ -4,7 +4,7 @@ const Complex = @import("./mulc3.zig").Complex;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__divsc3, .{ .name = "__divsc3", .linkage = common.linkage });
+ @export(__divsc3, .{ .name = "__divsc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divsf3.zig b/lib/compiler_rt/divsf3.zig
index 13565f9b64..5f05141610 100644
--- a/lib/compiler_rt/divsf3.zig
+++ b/lib/compiler_rt/divsf3.zig
@@ -13,9 +13,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = common.linkage });
+ @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__divsf3, .{ .name = "__divsf3", .linkage = common.linkage });
+ @export(__divsf3, .{ .name = "__divsf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divtc3.zig b/lib/compiler_rt/divtc3.zig
index e2382a1372..d5b1d12059 100644
--- a/lib/compiler_rt/divtc3.zig
+++ b/lib/compiler_rt/divtc3.zig
@@ -4,7 +4,7 @@ const Complex = @import("./mulc3.zig").Complex;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__divtc3, .{ .name = "__divtc3", .linkage = common.linkage });
+ @export(__divtc3, .{ .name = "__divtc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divtf3.zig b/lib/compiler_rt/divtf3.zig
index 736068e819..511a9582a3 100644
--- a/lib/compiler_rt/divtf3.zig
+++ b/lib/compiler_rt/divtf3.zig
@@ -10,11 +10,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
// TODO: why did this not error?
- @export(__divtf3, .{ .name = "__divkf3", .linkage = common.linkage });
+ @export(__divtf3, .{ .name = "__divkf3", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_div, .{ .name = "_Qp_div", .linkage = common.linkage });
+ @export(_Qp_div, .{ .name = "_Qp_div", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__divtf3, .{ .name = "__divtf3", .linkage = common.linkage });
+ @export(__divtf3, .{ .name = "__divtf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig
index 6899cd323f..31302aab4d 100644
--- a/lib/compiler_rt/divti3.zig
+++ b/lib/compiler_rt/divti3.zig
@@ -7,23 +7,10 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- if (builtin.os.tag == .windows) {
- switch (arch) {
- .x86 => {
- @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
- },
- .x86_64 => {
- // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
- // that LLVM expects compiler-rt to have.
- @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage });
- },
- else => {},
- }
- if (arch.isAARCH64()) {
- @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
- }
+ if (common.want_windows_v2u64_abi) {
+ @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
}
}
@@ -31,7 +18,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
return div(a, b);
}
-const v128 = @import("std").meta.Vector(2, u64);
+const v128 = @Vector(2, u64);
fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));
diff --git a/lib/compiler_rt/divxc3.zig b/lib/compiler_rt/divxc3.zig
index c6ed5a210c..177e26cfd8 100644
--- a/lib/compiler_rt/divxc3.zig
+++ b/lib/compiler_rt/divxc3.zig
@@ -4,7 +4,7 @@ const Complex = @import("./mulc3.zig").Complex;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__divxc3, .{ .name = "__divxc3", .linkage = common.linkage });
+ @export(__divxc3, .{ .name = "__divxc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/divxf3.zig b/lib/compiler_rt/divxf3.zig
index 2282c21299..18fd20b726 100644
--- a/lib/compiler_rt/divxf3.zig
+++ b/lib/compiler_rt/divxf3.zig
@@ -9,7 +9,7 @@ const wideMultiply = common.wideMultiply;
pub const panic = common.panic;
comptime {
- @export(__divxf3, .{ .name = "__divxf3", .linkage = common.linkage });
+ @export(__divxf3, .{ .name = "__divxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig
index c2a95f089f..4b37e6dcfc 100644
--- a/lib/compiler_rt/emutls.zig
+++ b/lib/compiler_rt/emutls.zig
@@ -19,7 +19,7 @@ pub const panic = common.panic;
comptime {
if (builtin.link_libc and (builtin.abi == .android or builtin.os.tag == .openbsd)) {
- @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage });
+ @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility });
}
}
@@ -86,12 +86,10 @@ const ObjectArray = struct {
/// create a new ObjectArray with n slots. must call deinit() to deallocate.
pub fn init(n: usize) *ObjectArray {
var array = simple_allocator.alloc(ObjectArray);
- errdefer simple_allocator.free(array);
array.* = ObjectArray{
.slots = simple_allocator.allocSlice(?ObjectPointer, n),
};
- errdefer simple_allocator.free(array.slots);
for (array.slots) |*object| {
object.* = null;
diff --git a/lib/compiler_rt/exp.zig b/lib/compiler_rt/exp.zig
index 7dfac7328c..24d29ad0bb 100644
--- a/lib/compiler_rt/exp.zig
+++ b/lib/compiler_rt/exp.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__exph, .{ .name = "__exph", .linkage = common.linkage });
- @export(expf, .{ .name = "expf", .linkage = common.linkage });
- @export(exp, .{ .name = "exp", .linkage = common.linkage });
- @export(__expx, .{ .name = "__expx", .linkage = common.linkage });
+ @export(__exph, .{ .name = "__exph", .linkage = common.linkage, .visibility = common.visibility });
+ @export(expf, .{ .name = "expf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(exp, .{ .name = "exp", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__expx, .{ .name = "__expx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(expq, .{ .name = "expf128", .linkage = common.linkage });
+ @export(expq, .{ .name = "expf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(expq, .{ .name = "expq", .linkage = common.linkage });
- @export(expl, .{ .name = "expl", .linkage = common.linkage });
+ @export(expq, .{ .name = "expq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(expl, .{ .name = "expl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __exph(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/exp2.zig b/lib/compiler_rt/exp2.zig
index daca339bae..1882367522 100644
--- a/lib/compiler_rt/exp2.zig
+++ b/lib/compiler_rt/exp2.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__exp2h, .{ .name = "__exp2h", .linkage = common.linkage });
- @export(exp2f, .{ .name = "exp2f", .linkage = common.linkage });
- @export(exp2, .{ .name = "exp2", .linkage = common.linkage });
- @export(__exp2x, .{ .name = "__exp2x", .linkage = common.linkage });
+ @export(__exp2h, .{ .name = "__exp2h", .linkage = common.linkage, .visibility = common.visibility });
+ @export(exp2f, .{ .name = "exp2f", .linkage = common.linkage, .visibility = common.visibility });
+ @export(exp2, .{ .name = "exp2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__exp2x, .{ .name = "__exp2x", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(exp2q, .{ .name = "exp2f128", .linkage = common.linkage });
+ @export(exp2q, .{ .name = "exp2f128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(exp2q, .{ .name = "exp2q", .linkage = common.linkage });
- @export(exp2l, .{ .name = "exp2l", .linkage = common.linkage });
+ @export(exp2q, .{ .name = "exp2q", .linkage = common.linkage, .visibility = common.visibility });
+ @export(exp2l, .{ .name = "exp2l", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __exp2h(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/extenddftf2.zig b/lib/compiler_rt/extenddftf2.zig
index 3005987530..e7b2d8ed70 100644
--- a/lib/compiler_rt/extenddftf2.zig
+++ b/lib/compiler_rt/extenddftf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = common.linkage });
+ @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = common.linkage });
+ @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage });
+ @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
diff --git a/lib/compiler_rt/extenddfxf2.zig b/lib/compiler_rt/extenddfxf2.zig
index 380a7de4a4..c9e10d57ec 100644
--- a/lib/compiler_rt/extenddfxf2.zig
+++ b/lib/compiler_rt/extenddfxf2.zig
@@ -4,7 +4,7 @@ const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
- @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage });
+ @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
diff --git a/lib/compiler_rt/extendhfdf2.zig b/lib/compiler_rt/extendhfdf2.zig
index f7a94f58ef..97311b104c 100644
--- a/lib/compiler_rt/extendhfdf2.zig
+++ b/lib/compiler_rt/extendhfdf2.zig
@@ -4,7 +4,7 @@ const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
- @export(__extendhfdf2, .{ .name = "__extendhfdf2", .linkage = common.linkage });
+ @export(__extendhfdf2, .{ .name = "__extendhfdf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhfdf2(a: common.F16T) callconv(.C) f64 {
diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig
index f019b42cb6..41d9282e8a 100644
--- a/lib/compiler_rt/extendhfsf2.zig
+++ b/lib/compiler_rt/extendhfsf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.gnu_f16_abi) {
- @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
+ @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_aeabi) {
- @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
+ @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
diff --git a/lib/compiler_rt/extendhftf2.zig b/lib/compiler_rt/extendhftf2.zig
index 5d339fabce..2d6307af35 100644
--- a/lib/compiler_rt/extendhftf2.zig
+++ b/lib/compiler_rt/extendhftf2.zig
@@ -4,7 +4,7 @@ const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
- @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage });
+ @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhftf2(a: common.F16T) callconv(.C) f128 {
diff --git a/lib/compiler_rt/extendhfxf2.zig b/lib/compiler_rt/extendhfxf2.zig
index e509f96575..1f9c43328f 100644
--- a/lib/compiler_rt/extendhfxf2.zig
+++ b/lib/compiler_rt/extendhfxf2.zig
@@ -4,7 +4,7 @@ const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
- @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage });
+ @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendhfxf2(a: common.F16T) callconv(.C) f80 {
diff --git a/lib/compiler_rt/extendsfdf2.zig b/lib/compiler_rt/extendsfdf2.zig
index 7fd69f6c22..0a7ba8df53 100644
--- a/lib/compiler_rt/extendsfdf2.zig
+++ b/lib/compiler_rt/extendsfdf2.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage });
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage });
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/extendsftf2.zig b/lib/compiler_rt/extendsftf2.zig
index 68b0747042..a74319745a 100644
--- a/lib/compiler_rt/extendsftf2.zig
+++ b/lib/compiler_rt/extendsftf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = common.linkage });
+ @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = common.linkage });
+ @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage });
+ @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
diff --git a/lib/compiler_rt/extendsfxf2.zig b/lib/compiler_rt/extendsfxf2.zig
index 41bb5ace85..938e65c1bd 100644
--- a/lib/compiler_rt/extendsfxf2.zig
+++ b/lib/compiler_rt/extendsfxf2.zig
@@ -4,7 +4,7 @@ const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
- @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage });
+ @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendsfxf2(a: f32) callconv(.C) f80 {
diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig
index 077d510646..c3243d3018 100644
--- a/lib/compiler_rt/extendxftf2.zig
+++ b/lib/compiler_rt/extendxftf2.zig
@@ -4,7 +4,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage });
+ @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendxftf2(a: f80) callconv(.C) f128 {
diff --git a/lib/compiler_rt/fabs.zig b/lib/compiler_rt/fabs.zig
index e9ce98bb7a..b38e15e593 100644
--- a/lib/compiler_rt/fabs.zig
+++ b/lib/compiler_rt/fabs.zig
@@ -6,15 +6,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__fabsh, .{ .name = "__fabsh", .linkage = common.linkage });
- @export(fabsf, .{ .name = "fabsf", .linkage = common.linkage });
- @export(fabs, .{ .name = "fabs", .linkage = common.linkage });
- @export(__fabsx, .{ .name = "__fabsx", .linkage = common.linkage });
+ @export(__fabsh, .{ .name = "__fabsh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fabsf, .{ .name = "fabsf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fabs, .{ .name = "fabs", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__fabsx, .{ .name = "__fabsx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(fabsq, .{ .name = "fabsf128", .linkage = common.linkage });
+ @export(fabsq, .{ .name = "fabsf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(fabsq, .{ .name = "fabsq", .linkage = common.linkage });
- @export(fabsl, .{ .name = "fabsl", .linkage = common.linkage });
+ @export(fabsq, .{ .name = "fabsq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fabsl, .{ .name = "fabsl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fabsh(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/fixdfdi.zig b/lib/compiler_rt/fixdfdi.zig
index 5935f23524..0329b3cc13 100644
--- a/lib/compiler_rt/fixdfdi.zig
+++ b/lib/compiler_rt/fixdfdi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage });
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage });
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixdfsi.zig b/lib/compiler_rt/fixdfsi.zig
index 983c84ccb1..74406171b4 100644
--- a/lib/compiler_rt/fixdfsi.zig
+++ b/lib/compiler_rt/fixdfsi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage });
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage });
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig
index 532b271072..ecb4e8912c 100644
--- a/lib/compiler_rt/fixdfti.zig
+++ b/lib/compiler_rt/fixdfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixdfti_windows_x86_64, .{ .name = "__fixdfti", .linkage = common.linkage });
+ @export(__fixdfti_windows_x86_64, .{ .name = "__fixdfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixhfdi.zig b/lib/compiler_rt/fixhfdi.zig
index 28e871f495..3cb1186d71 100644
--- a/lib/compiler_rt/fixhfdi.zig
+++ b/lib/compiler_rt/fixhfdi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage });
+ @export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixhfdi(a: f16) callconv(.C) i64 {
diff --git a/lib/compiler_rt/fixhfsi.zig b/lib/compiler_rt/fixhfsi.zig
index 23440eea22..2f24649fb7 100644
--- a/lib/compiler_rt/fixhfsi.zig
+++ b/lib/compiler_rt/fixhfsi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage });
+ @export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixhfsi(a: f16) callconv(.C) i32 {
diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig
index b6774968dd..2865bcad29 100644
--- a/lib/compiler_rt/fixhfti.zig
+++ b/lib/compiler_rt/fixhfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixhfti_windows_x86_64, .{ .name = "__fixhfti", .linkage = common.linkage });
+ @export(__fixhfti_windows_x86_64, .{ .name = "__fixhfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
+ @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixsfdi.zig b/lib/compiler_rt/fixsfdi.zig
index 0c4fb7f3f6..4bb63e6768 100644
--- a/lib/compiler_rt/fixsfdi.zig
+++ b/lib/compiler_rt/fixsfdi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage });
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage });
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixsfsi.zig b/lib/compiler_rt/fixsfsi.zig
index f48e354cd2..55eb6e2276 100644
--- a/lib/compiler_rt/fixsfsi.zig
+++ b/lib/compiler_rt/fixsfsi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage });
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage });
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig
index 5aa4068b62..cd5e524b20 100644
--- a/lib/compiler_rt/fixsfti.zig
+++ b/lib/compiler_rt/fixsfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixsfti_windows_x86_64, .{ .name = "__fixsfti", .linkage = common.linkage });
+ @export(__fixsfti_windows_x86_64, .{ .name = "__fixsfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixtfdi.zig b/lib/compiler_rt/fixtfdi.zig
index f9a32828f1..bafcf6d83a 100644
--- a/lib/compiler_rt/fixtfdi.zig
+++ b/lib/compiler_rt/fixtfdi.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = common.linkage });
+ @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = common.linkage });
+ @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage });
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixtfdi(a: f128) callconv(.C) i64 {
diff --git a/lib/compiler_rt/fixtfsi.zig b/lib/compiler_rt/fixtfsi.zig
index 0f5e3924ae..389f2b117f 100644
--- a/lib/compiler_rt/fixtfsi.zig
+++ b/lib/compiler_rt/fixtfsi.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = common.linkage });
+ @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = common.linkage });
+ @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage });
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixtfsi(a: f128) callconv(.C) i32 {
diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig
index ba46eb8598..03d861f92e 100644
--- a/lib/compiler_rt/fixtfti.zig
+++ b/lib/compiler_rt/fixtfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixtfti_windows_x86_64, .{ .name = "__fixtfti", .linkage = common.linkage });
+ @export(__fixtfti_windows_x86_64, .{ .name = "__fixtfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunsdfdi.zig b/lib/compiler_rt/fixunsdfdi.zig
index edc0806405..b544595c7f 100644
--- a/lib/compiler_rt/fixunsdfdi.zig
+++ b/lib/compiler_rt/fixunsdfdi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage });
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage });
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunsdfsi.zig b/lib/compiler_rt/fixunsdfsi.zig
index cc413f3983..87affbce32 100644
--- a/lib/compiler_rt/fixunsdfsi.zig
+++ b/lib/compiler_rt/fixunsdfsi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage });
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage });
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig
index 00a89ba2d6..242d84176b 100644
--- a/lib/compiler_rt/fixunsdfti.zig
+++ b/lib/compiler_rt/fixunsdfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixunsdfti_windows_x86_64, .{ .name = "__fixunsdfti", .linkage = common.linkage });
+ @export(__fixunsdfti_windows_x86_64, .{ .name = "__fixunsdfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunshfdi.zig b/lib/compiler_rt/fixunshfdi.zig
index 5058bc5e68..9c70df3d5e 100644
--- a/lib/compiler_rt/fixunshfdi.zig
+++ b/lib/compiler_rt/fixunshfdi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage });
+ @export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunshfdi(a: f16) callconv(.C) u64 {
diff --git a/lib/compiler_rt/fixunshfsi.zig b/lib/compiler_rt/fixunshfsi.zig
index 5755048814..e5070f7a1b 100644
--- a/lib/compiler_rt/fixunshfsi.zig
+++ b/lib/compiler_rt/fixunshfsi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage });
+ @export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunshfsi(a: f16) callconv(.C) u32 {
diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig
index 4f5179bfb5..0c67d4998a 100644
--- a/lib/compiler_rt/fixunshfti.zig
+++ b/lib/compiler_rt/fixunshfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixunshfti_windows_x86_64, .{ .name = "__fixunshfti", .linkage = common.linkage });
+ @export(__fixunshfti_windows_x86_64, .{ .name = "__fixunshfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
+ @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
@@ -16,7 +16,7 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 {
return floatToInt(u128, a);
}
-const v2u64 = @import("std").meta.Vector(2, u64);
+const v2u64 = @Vector(2, u64);
fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
diff --git a/lib/compiler_rt/fixunssfdi.zig b/lib/compiler_rt/fixunssfdi.zig
index 544dfcd97e..dd883693a7 100644
--- a/lib/compiler_rt/fixunssfdi.zig
+++ b/lib/compiler_rt/fixunssfdi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage });
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage });
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunssfsi.zig b/lib/compiler_rt/fixunssfsi.zig
index 24b1e86694..a071e674ae 100644
--- a/lib/compiler_rt/fixunssfsi.zig
+++ b/lib/compiler_rt/fixunssfsi.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage });
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage });
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig
index 0d3fa5d3b9..ae2a27ab8e 100644
--- a/lib/compiler_rt/fixunssfti.zig
+++ b/lib/compiler_rt/fixunssfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixunssfti_windows_x86_64, .{ .name = "__fixunssfti", .linkage = common.linkage });
+ @export(__fixunssfti_windows_x86_64, .{ .name = "__fixunssfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunstfdi.zig b/lib/compiler_rt/fixunstfdi.zig
index fb9ff5b330..710207b330 100644
--- a/lib/compiler_rt/fixunstfdi.zig
+++ b/lib/compiler_rt/fixunstfdi.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage });
+ @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = common.linkage });
+ @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage });
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
diff --git a/lib/compiler_rt/fixunstfsi.zig b/lib/compiler_rt/fixunstfsi.zig
index 79cacbe340..1b0b64c193 100644
--- a/lib/compiler_rt/fixunstfsi.zig
+++ b/lib/compiler_rt/fixunstfsi.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage });
+ @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = common.linkage });
+ @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage });
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig
index 02cabd4d46..72b529e0c9 100644
--- a/lib/compiler_rt/fixunstfti.zig
+++ b/lib/compiler_rt/fixunstfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixunstfti_windows_x86_64, .{ .name = "__fixunstfti", .linkage = common.linkage });
+ @export(__fixunstfti_windows_x86_64, .{ .name = "__fixunstfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixunsxfdi.zig b/lib/compiler_rt/fixunsxfdi.zig
index cb2760af4e..de1cd13806 100644
--- a/lib/compiler_rt/fixunsxfdi.zig
+++ b/lib/compiler_rt/fixunsxfdi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage });
+ @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunsxfdi(a: f80) callconv(.C) u64 {
diff --git a/lib/compiler_rt/fixunsxfsi.zig b/lib/compiler_rt/fixunsxfsi.zig
index bec36abbf4..93cdcb2bab 100644
--- a/lib/compiler_rt/fixunsxfsi.zig
+++ b/lib/compiler_rt/fixunsxfsi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage });
+ @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunsxfsi(a: f80) callconv(.C) u32 {
diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig
index 1cf5891f92..a34bd288c0 100644
--- a/lib/compiler_rt/fixunsxfti.zig
+++ b/lib/compiler_rt/fixunsxfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixunsxfti_windows_x86_64, .{ .name = "__fixunsxfti", .linkage = common.linkage });
+ @export(__fixunsxfti_windows_x86_64, .{ .name = "__fixunsxfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
+ @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/fixxfdi.zig b/lib/compiler_rt/fixxfdi.zig
index 0f249e0a92..096e381629 100644
--- a/lib/compiler_rt/fixxfdi.zig
+++ b/lib/compiler_rt/fixxfdi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage });
+ @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixxfdi(a: f80) callconv(.C) i64 {
diff --git a/lib/compiler_rt/fixxfsi.zig b/lib/compiler_rt/fixxfsi.zig
index ac2158b7b8..b6714d2066 100644
--- a/lib/compiler_rt/fixxfsi.zig
+++ b/lib/compiler_rt/fixxfsi.zig
@@ -4,7 +4,7 @@ const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
- @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage });
+ @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixxfsi(a: f80) callconv(.C) i32 {
diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig
index 9a40ec3d6a..2e9dbdc2fb 100644
--- a/lib/compiler_rt/fixxfti.zig
+++ b/lib/compiler_rt/fixxfti.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__fixxfti_windows_x86_64, .{ .name = "__fixxfti", .linkage = common.linkage });
+ @export(__fixxfti_windows_x86_64, .{ .name = "__fixxfti", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
+ @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatdidf.zig b/lib/compiler_rt/floatdidf.zig
index 9117e2189d..9b9df4ae70 100644
--- a/lib/compiler_rt/floatdidf.zig
+++ b/lib/compiler_rt/floatdidf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage });
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage });
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatdihf.zig b/lib/compiler_rt/floatdihf.zig
index f2f7236d6f..1db7a0eac6 100644
--- a/lib/compiler_rt/floatdihf.zig
+++ b/lib/compiler_rt/floatdihf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage });
+ @export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatdihf(a: i64) callconv(.C) f16 {
diff --git a/lib/compiler_rt/floatdisf.zig b/lib/compiler_rt/floatdisf.zig
index 3de94c5103..3bdcc60f20 100644
--- a/lib/compiler_rt/floatdisf.zig
+++ b/lib/compiler_rt/floatdisf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage });
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage });
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatditf.zig b/lib/compiler_rt/floatditf.zig
index 1f651b817b..173dd79f75 100644
--- a/lib/compiler_rt/floatditf.zig
+++ b/lib/compiler_rt/floatditf.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__floatditf, .{ .name = "__floatdikf", .linkage = common.linkage });
+ @export(__floatditf, .{ .name = "__floatdikf", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = common.linkage });
+ @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage });
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatditf(a: i64) callconv(.C) f128 {
diff --git a/lib/compiler_rt/floatdixf.zig b/lib/compiler_rt/floatdixf.zig
index 7d80fdbeb8..c4fc9ca28f 100644
--- a/lib/compiler_rt/floatdixf.zig
+++ b/lib/compiler_rt/floatdixf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage });
+ @export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatdixf(a: i64) callconv(.C) f80 {
diff --git a/lib/compiler_rt/floatsidf.zig b/lib/compiler_rt/floatsidf.zig
index e31c2616fd..7ec7d90fba 100644
--- a/lib/compiler_rt/floatsidf.zig
+++ b/lib/compiler_rt/floatsidf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage });
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage });
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatsihf.zig b/lib/compiler_rt/floatsihf.zig
index 84b54298b5..0a08c19847 100644
--- a/lib/compiler_rt/floatsihf.zig
+++ b/lib/compiler_rt/floatsihf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage });
+ @export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatsihf(a: i32) callconv(.C) f16 {
diff --git a/lib/compiler_rt/floatsisf.zig b/lib/compiler_rt/floatsisf.zig
index 87f83315c1..daddfb06e1 100644
--- a/lib/compiler_rt/floatsisf.zig
+++ b/lib/compiler_rt/floatsisf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage });
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage });
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatsitf.zig b/lib/compiler_rt/floatsitf.zig
index 20473fa1d0..9739b91280 100644
--- a/lib/compiler_rt/floatsitf.zig
+++ b/lib/compiler_rt/floatsitf.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__floatsitf, .{ .name = "__floatsikf", .linkage = common.linkage });
+ @export(__floatsitf, .{ .name = "__floatsikf", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = common.linkage });
+ @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage });
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatsitf(a: i32) callconv(.C) f128 {
diff --git a/lib/compiler_rt/floatsixf.zig b/lib/compiler_rt/floatsixf.zig
index 76d266e17a..a9d3709911 100644
--- a/lib/compiler_rt/floatsixf.zig
+++ b/lib/compiler_rt/floatsixf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage });
+ @export(__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatsixf(a: i32) callconv(.C) f80 {
diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig
index 31456948e9..d70fedbfc3 100644
--- a/lib/compiler_rt/floattidf.zig
+++ b/lib/compiler_rt/floattidf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floattidf_windows_x86_64, .{ .name = "__floattidf", .linkage = common.linkage });
+ @export(__floattidf_windows_x86_64, .{ .name = "__floattidf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig
index 3e33a0bd8a..f90a57d1e0 100644
--- a/lib/compiler_rt/floattihf.zig
+++ b/lib/compiler_rt/floattihf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floattihf_windows_x86_64, .{ .name = "__floattihf", .linkage = common.linkage });
+ @export(__floattihf_windows_x86_64, .{ .name = "__floattihf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
+ @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig
index 23ff0d16b4..737e1ec409 100644
--- a/lib/compiler_rt/floattisf.zig
+++ b/lib/compiler_rt/floattisf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floattisf_windows_x86_64, .{ .name = "__floattisf", .linkage = common.linkage });
+ @export(__floattisf_windows_x86_64, .{ .name = "__floattisf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig
index c44473cc3d..62c215c986 100644
--- a/lib/compiler_rt/floattitf.zig
+++ b/lib/compiler_rt/floattitf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floattitf_windows_x86_64, .{ .name = "__floattitf", .linkage = common.linkage });
+ @export(__floattitf_windows_x86_64, .{ .name = "__floattitf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig
index 814880b9ab..1af4f83965 100644
--- a/lib/compiler_rt/floattixf.zig
+++ b/lib/compiler_rt/floattixf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floattixf_windows_x86_64, .{ .name = "__floattixf", .linkage = common.linkage });
+ @export(__floattixf_windows_x86_64, .{ .name = "__floattixf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
+ @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatundidf.zig b/lib/compiler_rt/floatundidf.zig
index d49575639e..db4cc6505e 100644
--- a/lib/compiler_rt/floatundidf.zig
+++ b/lib/compiler_rt/floatundidf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = common.linkage });
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatundidf, .{ .name = "__floatundidf", .linkage = common.linkage });
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatundihf.zig b/lib/compiler_rt/floatundihf.zig
index 6eff8aaec3..e6c6a79d5e 100644
--- a/lib/compiler_rt/floatundihf.zig
+++ b/lib/compiler_rt/floatundihf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage });
+ @export(__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatundihf(a: u64) callconv(.C) f16 {
diff --git a/lib/compiler_rt/floatundisf.zig b/lib/compiler_rt/floatundisf.zig
index 963670d85b..eb17c0f657 100644
--- a/lib/compiler_rt/floatundisf.zig
+++ b/lib/compiler_rt/floatundisf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = common.linkage });
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatundisf, .{ .name = "__floatundisf", .linkage = common.linkage });
+ @export(__floatundisf, .{ .name = "__floatundisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatunditf.zig b/lib/compiler_rt/floatunditf.zig
index 9236f4705f..0bfa36d6e1 100644
--- a/lib/compiler_rt/floatunditf.zig
+++ b/lib/compiler_rt/floatunditf.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__floatunditf, .{ .name = "__floatundikf", .linkage = common.linkage });
+ @export(__floatunditf, .{ .name = "__floatundikf", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = common.linkage });
+ @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage });
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatunditf(a: u64) callconv(.C) f128 {
diff --git a/lib/compiler_rt/floatundixf.zig b/lib/compiler_rt/floatundixf.zig
index 331b74df4f..22f885167f 100644
--- a/lib/compiler_rt/floatundixf.zig
+++ b/lib/compiler_rt/floatundixf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage });
+ @export(__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatundixf(a: u64) callconv(.C) f80 {
diff --git a/lib/compiler_rt/floatunsidf.zig b/lib/compiler_rt/floatunsidf.zig
index 1f5a47287a..ef5bce2afa 100644
--- a/lib/compiler_rt/floatunsidf.zig
+++ b/lib/compiler_rt/floatunsidf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = common.linkage });
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = common.linkage });
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatunsihf.zig b/lib/compiler_rt/floatunsihf.zig
index b2f679c18c..0b43d61f4c 100644
--- a/lib/compiler_rt/floatunsihf.zig
+++ b/lib/compiler_rt/floatunsihf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage });
+ @export(__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatunsihf(a: u32) callconv(.C) f16 {
diff --git a/lib/compiler_rt/floatunsisf.zig b/lib/compiler_rt/floatunsisf.zig
index 46f336a4d8..f85d1bb013 100644
--- a/lib/compiler_rt/floatunsisf.zig
+++ b/lib/compiler_rt/floatunsisf.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = common.linkage });
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = common.linkage });
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatunsitf.zig b/lib/compiler_rt/floatunsitf.zig
index af454e1e02..ef9593cdf6 100644
--- a/lib/compiler_rt/floatunsitf.zig
+++ b/lib/compiler_rt/floatunsitf.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = common.linkage });
+ @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = common.linkage });
+ @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage });
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatunsitf(a: u32) callconv(.C) f128 {
diff --git a/lib/compiler_rt/floatunsixf.zig b/lib/compiler_rt/floatunsixf.zig
index 40492564fc..cd402e227d 100644
--- a/lib/compiler_rt/floatunsixf.zig
+++ b/lib/compiler_rt/floatunsixf.zig
@@ -4,7 +4,7 @@ const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
- @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage });
+ @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatunsixf(a: u32) callconv(.C) f80 {
diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig
index a00175d9a9..d3a685a1ce 100644
--- a/lib/compiler_rt/floatuntidf.zig
+++ b/lib/compiler_rt/floatuntidf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floatuntidf_windows_x86_64, .{ .name = "__floatuntidf", .linkage = common.linkage });
+ @export(__floatuntidf_windows_x86_64, .{ .name = "__floatuntidf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig
index 3cf7a32d27..9102960e8d 100644
--- a/lib/compiler_rt/floatuntihf.zig
+++ b/lib/compiler_rt/floatuntihf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floatuntihf_windows_x86_64, .{ .name = "__floatuntihf", .linkage = common.linkage });
+ @export(__floatuntihf_windows_x86_64, .{ .name = "__floatuntihf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
+ @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig
index 997d57293e..7ee013339d 100644
--- a/lib/compiler_rt/floatuntisf.zig
+++ b/lib/compiler_rt/floatuntisf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floatuntisf_windows_x86_64, .{ .name = "__floatuntisf", .linkage = common.linkage });
+ @export(__floatuntisf_windows_x86_64, .{ .name = "__floatuntisf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig
index ee386bc96b..413d9253d0 100644
--- a/lib/compiler_rt/floatuntitf.zig
+++ b/lib/compiler_rt/floatuntitf.zig
@@ -6,12 +6,12 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floatuntitf_windows_x86_64, .{ .name = "__floatuntitf", .linkage = common.linkage });
+ @export(__floatuntitf_windows_x86_64, .{ .name = "__floatuntitf", .linkage = common.linkage, .visibility = common.visibility });
} else {
if (common.want_ppc_abi) {
- @export(__floatuntitf, .{ .name = "__floatuntikf", .linkage = common.linkage });
+ @export(__floatuntitf, .{ .name = "__floatuntikf", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage });
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig
index 724af95075..877203bd22 100644
--- a/lib/compiler_rt/floatuntixf.zig
+++ b/lib/compiler_rt/floatuntixf.zig
@@ -6,9 +6,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__floatuntixf_windows_x86_64, .{ .name = "__floatuntixf", .linkage = common.linkage });
+ @export(__floatuntixf_windows_x86_64, .{ .name = "__floatuntixf", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
+ @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/floor.zig b/lib/compiler_rt/floor.zig
index afdc825f9f..ea274c0d82 100644
--- a/lib/compiler_rt/floor.zig
+++ b/lib/compiler_rt/floor.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__floorh, .{ .name = "__floorh", .linkage = common.linkage });
- @export(floorf, .{ .name = "floorf", .linkage = common.linkage });
- @export(floor, .{ .name = "floor", .linkage = common.linkage });
- @export(__floorx, .{ .name = "__floorx", .linkage = common.linkage });
+ @export(__floorh, .{ .name = "__floorh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(floorf, .{ .name = "floorf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(floor, .{ .name = "floor", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__floorx, .{ .name = "__floorx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(floorq, .{ .name = "floorf128", .linkage = common.linkage });
+ @export(floorq, .{ .name = "floorf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(floorq, .{ .name = "floorq", .linkage = common.linkage });
- @export(floorl, .{ .name = "floorl", .linkage = common.linkage });
+ @export(floorq, .{ .name = "floorq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(floorl, .{ .name = "floorl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floorh(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/fma.zig b/lib/compiler_rt/fma.zig
index 4bde30f50f..fe2da1c99c 100644
--- a/lib/compiler_rt/fma.zig
+++ b/lib/compiler_rt/fma.zig
@@ -15,15 +15,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__fmah, .{ .name = "__fmah", .linkage = common.linkage });
- @export(fmaf, .{ .name = "fmaf", .linkage = common.linkage });
- @export(fma, .{ .name = "fma", .linkage = common.linkage });
- @export(__fmax, .{ .name = "__fmax", .linkage = common.linkage });
+ @export(__fmah, .{ .name = "__fmah", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmaf, .{ .name = "fmaf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fma, .{ .name = "fma", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__fmax, .{ .name = "__fmax", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(fmaq, .{ .name = "fmaf128", .linkage = common.linkage });
+ @export(fmaq, .{ .name = "fmaf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(fmaq, .{ .name = "fmaq", .linkage = common.linkage });
- @export(fmal, .{ .name = "fmal", .linkage = common.linkage });
+ @export(fmaq, .{ .name = "fmaq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmal, .{ .name = "fmal", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/fmax.zig b/lib/compiler_rt/fmax.zig
index df7c4a7f2e..9645b665f3 100644
--- a/lib/compiler_rt/fmax.zig
+++ b/lib/compiler_rt/fmax.zig
@@ -7,15 +7,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__fmaxh, .{ .name = "__fmaxh", .linkage = common.linkage });
- @export(fmaxf, .{ .name = "fmaxf", .linkage = common.linkage });
- @export(fmax, .{ .name = "fmax", .linkage = common.linkage });
- @export(__fmaxx, .{ .name = "__fmaxx", .linkage = common.linkage });
+ @export(__fmaxh, .{ .name = "__fmaxh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmaxf, .{ .name = "fmaxf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmax, .{ .name = "fmax", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__fmaxx, .{ .name = "__fmaxx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(fmaxq, .{ .name = "fmaxf128", .linkage = common.linkage });
+ @export(fmaxq, .{ .name = "fmaxf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(fmaxq, .{ .name = "fmaxq", .linkage = common.linkage });
- @export(fmaxl, .{ .name = "fmaxl", .linkage = common.linkage });
+ @export(fmaxq, .{ .name = "fmaxq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmaxl, .{ .name = "fmaxl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fmaxh(x: f16, y: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/fmin.zig b/lib/compiler_rt/fmin.zig
index 7565f3a572..6cb7f8544e 100644
--- a/lib/compiler_rt/fmin.zig
+++ b/lib/compiler_rt/fmin.zig
@@ -7,15 +7,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__fminh, .{ .name = "__fminh", .linkage = common.linkage });
- @export(fminf, .{ .name = "fminf", .linkage = common.linkage });
- @export(fmin, .{ .name = "fmin", .linkage = common.linkage });
- @export(__fminx, .{ .name = "__fminx", .linkage = common.linkage });
+ @export(__fminh, .{ .name = "__fminh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fminf, .{ .name = "fminf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmin, .{ .name = "fmin", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__fminx, .{ .name = "__fminx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(fminq, .{ .name = "fminf128", .linkage = common.linkage });
+ @export(fminq, .{ .name = "fminf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(fminq, .{ .name = "fminq", .linkage = common.linkage });
- @export(fminl, .{ .name = "fminl", .linkage = common.linkage });
+ @export(fminq, .{ .name = "fminq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fminl, .{ .name = "fminl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fminh(x: f16, y: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/fmod.zig b/lib/compiler_rt/fmod.zig
index e276b99884..b80dffdb82 100644
--- a/lib/compiler_rt/fmod.zig
+++ b/lib/compiler_rt/fmod.zig
@@ -9,15 +9,15 @@ const normalize = common.normalize;
pub const panic = common.panic;
comptime {
- @export(__fmodh, .{ .name = "__fmodh", .linkage = common.linkage });
- @export(fmodf, .{ .name = "fmodf", .linkage = common.linkage });
- @export(fmod, .{ .name = "fmod", .linkage = common.linkage });
- @export(__fmodx, .{ .name = "__fmodx", .linkage = common.linkage });
+ @export(__fmodh, .{ .name = "__fmodh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmodf, .{ .name = "fmodf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmod, .{ .name = "fmod", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__fmodx, .{ .name = "__fmodx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(fmodq, .{ .name = "fmodf128", .linkage = common.linkage });
+ @export(fmodq, .{ .name = "fmodf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(fmodq, .{ .name = "fmodq", .linkage = common.linkage });
- @export(fmodl, .{ .name = "fmodl", .linkage = common.linkage });
+ @export(fmodq, .{ .name = "fmodq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(fmodl, .{ .name = "fmodl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/gedf2.zig b/lib/compiler_rt/gedf2.zig
index 684ba665b5..c887a9b6e4 100644
--- a/lib/compiler_rt/gedf2.zig
+++ b/lib/compiler_rt/gedf2.zig
@@ -7,11 +7,11 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = common.linkage });
- @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = common.linkage });
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__gedf2, .{ .name = "__gedf2", .linkage = common.linkage });
- @export(__gtdf2, .{ .name = "__gtdf2", .linkage = common.linkage });
+ @export(__gedf2, .{ .name = "__gedf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gtdf2, .{ .name = "__gtdf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/gehf2.zig b/lib/compiler_rt/gehf2.zig
index 651cbf943f..e3df4d3087 100644
--- a/lib/compiler_rt/gehf2.zig
+++ b/lib/compiler_rt/gehf2.zig
@@ -6,8 +6,8 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__gehf2, .{ .name = "__gehf2", .linkage = common.linkage });
- @export(__gthf2, .{ .name = "__gthf2", .linkage = common.linkage });
+ @export(__gehf2, .{ .name = "__gehf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gthf2, .{ .name = "__gthf2", .linkage = common.linkage, .visibility = common.visibility });
}
/// "These functions return a value greater than or equal to zero if neither
diff --git a/lib/compiler_rt/gesf2.zig b/lib/compiler_rt/gesf2.zig
index 3d455e52bf..44439976bb 100644
--- a/lib/compiler_rt/gesf2.zig
+++ b/lib/compiler_rt/gesf2.zig
@@ -7,11 +7,11 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = common.linkage });
- @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = common.linkage });
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__gesf2, .{ .name = "__gesf2", .linkage = common.linkage });
- @export(__gtsf2, .{ .name = "__gtsf2", .linkage = common.linkage });
+ @export(__gesf2, .{ .name = "__gesf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gtsf2, .{ .name = "__gtsf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/getf2.zig b/lib/compiler_rt/getf2.zig
index 831736250f..1a5c06af3d 100644
--- a/lib/compiler_rt/getf2.zig
+++ b/lib/compiler_rt/getf2.zig
@@ -7,14 +7,14 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__getf2, .{ .name = "__gekf2", .linkage = common.linkage });
- @export(__gttf2, .{ .name = "__gtkf2", .linkage = common.linkage });
+ @export(__getf2, .{ .name = "__gekf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gttf2, .{ .name = "__gtkf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
// These exports are handled in cmptf2.zig because gt and ge on sparc
// are based on calling _Qp_cmp.
}
- @export(__getf2, .{ .name = "__getf2", .linkage = common.linkage });
- @export(__gttf2, .{ .name = "__gttf2", .linkage = common.linkage });
+ @export(__getf2, .{ .name = "__getf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gttf2, .{ .name = "__gttf2", .linkage = common.linkage, .visibility = common.visibility });
}
/// "These functions return a value greater than or equal to zero if neither
diff --git a/lib/compiler_rt/gexf2.zig b/lib/compiler_rt/gexf2.zig
index 6bb88fbb8f..bf0b0edccb 100644
--- a/lib/compiler_rt/gexf2.zig
+++ b/lib/compiler_rt/gexf2.zig
@@ -4,8 +4,8 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__gexf2, .{ .name = "__gexf2", .linkage = common.linkage });
- @export(__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage });
+ @export(__gexf2, .{ .name = "__gexf2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig
index 43bc160cda..b844af1859 100644
--- a/lib/compiler_rt/int.zig
+++ b/lib/compiler_rt/int.zig
@@ -13,24 +13,24 @@ const udivmod = @import("udivmod.zig").udivmod;
pub const panic = common.panic;
comptime {
- @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage });
- @export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage });
- @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage });
+ @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_aeabi) {
- @export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage });
- @export(__aeabi_uidiv, .{ .name = "__aeabi_uidiv", .linkage = common.linkage });
+ @export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_uidiv, .{ .name = "__aeabi_uidiv", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__divsi3, .{ .name = "__divsi3", .linkage = common.linkage });
- @export(__udivsi3, .{ .name = "__udivsi3", .linkage = common.linkage });
+ @export(__divsi3, .{ .name = "__divsi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__udivsi3, .{ .name = "__udivsi3", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__divdi3, .{ .name = "__divdi3", .linkage = common.linkage });
- @export(__udivdi3, .{ .name = "__udivdi3", .linkage = common.linkage });
- @export(__modsi3, .{ .name = "__modsi3", .linkage = common.linkage });
- @export(__moddi3, .{ .name = "__moddi3", .linkage = common.linkage });
- @export(__umodsi3, .{ .name = "__umodsi3", .linkage = common.linkage });
- @export(__umoddi3, .{ .name = "__umoddi3", .linkage = common.linkage });
- @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = common.linkage });
- @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = common.linkage });
+ @export(__divdi3, .{ .name = "__divdi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__udivdi3, .{ .name = "__udivdi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__modsi3, .{ .name = "__modsi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__moddi3, .{ .name = "__moddi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__umodsi3, .{ .name = "__umodsi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__umoddi3, .{ .name = "__umoddi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
diff --git a/lib/compiler_rt/int_to_float_test.zig b/lib/compiler_rt/int_to_float_test.zig
index 7d81115755..608a925bf2 100644
--- a/lib/compiler_rt/int_to_float_test.zig
+++ b/lib/compiler_rt/int_to_float_test.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const testing = std.testing;
const math = std.math;
@@ -811,8 +810,6 @@ test "conversion to f32" {
}
test "conversion to f80" {
- if (builtin.zig_backend == .stage1 and builtin.cpu.arch != .x86_64)
- return error.SkipZigTest; // https://github.com/ziglang/zig/issues/11408
if (std.debug.runtime_safety) return error.SkipZigTest;
const intToFloat = @import("./int_to_float.zig").intToFloat;
diff --git a/lib/compiler_rt/log.zig b/lib/compiler_rt/log.zig
index 5e7085a41b..71d90f7c3a 100644
--- a/lib/compiler_rt/log.zig
+++ b/lib/compiler_rt/log.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__logh, .{ .name = "__logh", .linkage = common.linkage });
- @export(logf, .{ .name = "logf", .linkage = common.linkage });
- @export(log, .{ .name = "log", .linkage = common.linkage });
- @export(__logx, .{ .name = "__logx", .linkage = common.linkage });
+ @export(__logh, .{ .name = "__logh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(logf, .{ .name = "logf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log, .{ .name = "log", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__logx, .{ .name = "__logx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(logq, .{ .name = "logf128", .linkage = common.linkage });
+ @export(logq, .{ .name = "logf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(logq, .{ .name = "logq", .linkage = common.linkage });
- @export(logl, .{ .name = "logl", .linkage = common.linkage });
+ @export(logq, .{ .name = "logq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(logl, .{ .name = "logl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __logh(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/log10.zig b/lib/compiler_rt/log10.zig
index 54638cc645..5c345ff12f 100644
--- a/lib/compiler_rt/log10.zig
+++ b/lib/compiler_rt/log10.zig
@@ -15,15 +15,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__log10h, .{ .name = "__log10h", .linkage = common.linkage });
- @export(log10f, .{ .name = "log10f", .linkage = common.linkage });
- @export(log10, .{ .name = "log10", .linkage = common.linkage });
- @export(__log10x, .{ .name = "__log10x", .linkage = common.linkage });
+ @export(__log10h, .{ .name = "__log10h", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log10f, .{ .name = "log10f", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log10, .{ .name = "log10", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__log10x, .{ .name = "__log10x", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(log10q, .{ .name = "log10f128", .linkage = common.linkage });
+ @export(log10q, .{ .name = "log10f128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(log10q, .{ .name = "log10q", .linkage = common.linkage });
- @export(log10l, .{ .name = "log10l", .linkage = common.linkage });
+ @export(log10q, .{ .name = "log10q", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log10l, .{ .name = "log10l", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __log10h(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/log2.zig b/lib/compiler_rt/log2.zig
index 8298c3dbe3..612c978598 100644
--- a/lib/compiler_rt/log2.zig
+++ b/lib/compiler_rt/log2.zig
@@ -15,15 +15,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__log2h, .{ .name = "__log2h", .linkage = common.linkage });
- @export(log2f, .{ .name = "log2f", .linkage = common.linkage });
- @export(log2, .{ .name = "log2", .linkage = common.linkage });
- @export(__log2x, .{ .name = "__log2x", .linkage = common.linkage });
+ @export(__log2h, .{ .name = "__log2h", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log2f, .{ .name = "log2f", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log2, .{ .name = "log2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__log2x, .{ .name = "__log2x", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(log2q, .{ .name = "log2f128", .linkage = common.linkage });
+ @export(log2q, .{ .name = "log2f128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(log2q, .{ .name = "log2q", .linkage = common.linkage });
- @export(log2l, .{ .name = "log2l", .linkage = common.linkage });
+ @export(log2q, .{ .name = "log2q", .linkage = common.linkage, .visibility = common.visibility });
+ @export(log2l, .{ .name = "log2l", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __log2h(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/memcmp.zig b/lib/compiler_rt/memcmp.zig
index 6f7e48135b..b337e6f29e 100644
--- a/lib/compiler_rt/memcmp.zig
+++ b/lib/compiler_rt/memcmp.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const common = @import("./common.zig");
comptime {
- @export(memcmp, .{ .name = "memcmp", .linkage = common.linkage });
+ @export(memcmp, .{ .name = "memcmp", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn memcmp(vl: [*]const u8, vr: [*]const u8, n: usize) callconv(.C) c_int {
diff --git a/lib/compiler_rt/memcpy.zig b/lib/compiler_rt/memcpy.zig
index 8f58fe909c..410117b6c3 100644
--- a/lib/compiler_rt/memcpy.zig
+++ b/lib/compiler_rt/memcpy.zig
@@ -4,7 +4,7 @@ const builtin = @import("builtin");
comptime {
if (builtin.object_format != .c) {
- @export(memcpy, .{ .name = "memcpy", .linkage = common.linkage });
+ @export(memcpy, .{ .name = "memcpy", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/memmove.zig b/lib/compiler_rt/memmove.zig
index 73d8679529..61ccb1205d 100644
--- a/lib/compiler_rt/memmove.zig
+++ b/lib/compiler_rt/memmove.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const common = @import("./common.zig");
comptime {
- @export(memmove, .{ .name = "memmove", .linkage = common.linkage });
+ @export(memmove, .{ .name = "memmove", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
diff --git a/lib/compiler_rt/memset.zig b/lib/compiler_rt/memset.zig
index e88e74079a..25c5493607 100644
--- a/lib/compiler_rt/memset.zig
+++ b/lib/compiler_rt/memset.zig
@@ -4,8 +4,8 @@ const builtin = @import("builtin");
comptime {
if (builtin.object_format != .c) {
- @export(memset, .{ .name = "memset", .linkage = common.linkage });
- @export(__memset, .{ .name = "__memset", .linkage = common.linkage });
+ @export(memset, .{ .name = "memset", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__memset, .{ .name = "__memset", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig
index 9992f716ee..ef02a697bc 100644
--- a/lib/compiler_rt/modti3.zig
+++ b/lib/compiler_rt/modti3.zig
@@ -11,9 +11,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage });
+ @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/muldc3.zig b/lib/compiler_rt/muldc3.zig
index cef8f660b2..3a497fdedc 100644
--- a/lib/compiler_rt/muldc3.zig
+++ b/lib/compiler_rt/muldc3.zig
@@ -5,7 +5,7 @@ pub const panic = common.panic;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__muldc3, .{ .name = "__muldc3", .linkage = common.linkage });
+ @export(__muldc3, .{ .name = "__muldc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/muldf3.zig b/lib/compiler_rt/muldf3.zig
index ef7ab9fbf7..81c6989381 100644
--- a/lib/compiler_rt/muldf3.zig
+++ b/lib/compiler_rt/muldf3.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = common.linkage });
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__muldf3, .{ .name = "__muldf3", .linkage = common.linkage });
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/muldi3.zig b/lib/compiler_rt/muldi3.zig
index a51c6c7b76..c79713fed0 100644
--- a/lib/compiler_rt/muldi3.zig
+++ b/lib/compiler_rt/muldi3.zig
@@ -10,9 +10,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage });
+ @export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage });
+ @export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/mulhc3.zig b/lib/compiler_rt/mulhc3.zig
index 70c68e2a72..cf44f1cc14 100644
--- a/lib/compiler_rt/mulhc3.zig
+++ b/lib/compiler_rt/mulhc3.zig
@@ -5,7 +5,7 @@ pub const panic = common.panic;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__mulhc3, .{ .name = "__mulhc3", .linkage = common.linkage });
+ @export(__mulhc3, .{ .name = "__mulhc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/mulhf3.zig b/lib/compiler_rt/mulhf3.zig
index 45251548be..c38c659092 100644
--- a/lib/compiler_rt/mulhf3.zig
+++ b/lib/compiler_rt/mulhf3.zig
@@ -4,7 +4,7 @@ const mulf3 = @import("./mulf3.zig").mulf3;
pub const panic = common.panic;
comptime {
- @export(__mulhf3, .{ .name = "__mulhf3", .linkage = common.linkage });
+ @export(__mulhf3, .{ .name = "__mulhf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __mulhf3(a: f16, b: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/mulo.zig b/lib/compiler_rt/mulo.zig
index 8347a1fbe8..13e58a7800 100644
--- a/lib/compiler_rt/mulo.zig
+++ b/lib/compiler_rt/mulo.zig
@@ -6,9 +6,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__mulosi4, .{ .name = "__mulosi4", .linkage = common.linkage });
- @export(__mulodi4, .{ .name = "__mulodi4", .linkage = common.linkage });
- @export(__muloti4, .{ .name = "__muloti4", .linkage = common.linkage });
+ @export(__mulosi4, .{ .name = "__mulosi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__mulodi4, .{ .name = "__mulodi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__muloti4, .{ .name = "__muloti4", .linkage = common.linkage, .visibility = common.visibility });
}
// mulo - multiplication overflow
@@ -65,15 +65,6 @@ pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
}
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
- switch (builtin.zig_backend) {
- .stage1, .stage2_llvm => {
- // Workaround for https://github.com/llvm/llvm-project/issues/56403
- // When we call the genericSmall implementation instead, LLVM optimizer
- // optimizes __muloti4 to a call to itself.
- return muloXi4_genericFast(i128, a, b, overflow);
- },
- else => {},
- }
if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
return muloXi4_genericFast(i128, a, b, overflow);
} else {
diff --git a/lib/compiler_rt/mulsc3.zig b/lib/compiler_rt/mulsc3.zig
index 5fe289453b..add389a598 100644
--- a/lib/compiler_rt/mulsc3.zig
+++ b/lib/compiler_rt/mulsc3.zig
@@ -5,7 +5,7 @@ pub const panic = common.panic;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__mulsc3, .{ .name = "__mulsc3", .linkage = common.linkage });
+ @export(__mulsc3, .{ .name = "__mulsc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/mulsf3.zig b/lib/compiler_rt/mulsf3.zig
index 3294f5b1c7..27d0e09f34 100644
--- a/lib/compiler_rt/mulsf3.zig
+++ b/lib/compiler_rt/mulsf3.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = common.linkage });
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__mulsf3, .{ .name = "__mulsf3", .linkage = common.linkage });
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/multc3.zig b/lib/compiler_rt/multc3.zig
index 94f2205d76..89054e4af8 100644
--- a/lib/compiler_rt/multc3.zig
+++ b/lib/compiler_rt/multc3.zig
@@ -5,7 +5,7 @@ pub const panic = common.panic;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__multc3, .{ .name = "__multc3", .linkage = common.linkage });
+ @export(__multc3, .{ .name = "__multc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/multf3.zig b/lib/compiler_rt/multf3.zig
index 878cd5c4ac..453a869a83 100644
--- a/lib/compiler_rt/multf3.zig
+++ b/lib/compiler_rt/multf3.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__multf3, .{ .name = "__mulkf3", .linkage = common.linkage });
+ @export(__multf3, .{ .name = "__mulkf3", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = common.linkage });
+ @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__multf3, .{ .name = "__multf3", .linkage = common.linkage });
+ @export(__multf3, .{ .name = "__multf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
diff --git a/lib/compiler_rt/multi3.zig b/lib/compiler_rt/multi3.zig
index 9502dc5677..1918e8b976 100644
--- a/lib/compiler_rt/multi3.zig
+++ b/lib/compiler_rt/multi3.zig
@@ -11,9 +11,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage });
+ @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
+ @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/mulxc3.zig b/lib/compiler_rt/mulxc3.zig
index 4a90d0e453..8d836ef04d 100644
--- a/lib/compiler_rt/mulxc3.zig
+++ b/lib/compiler_rt/mulxc3.zig
@@ -5,7 +5,7 @@ pub const panic = common.panic;
comptime {
if (@import("builtin").zig_backend != .stage2_c) {
- @export(__mulxc3, .{ .name = "__mulxc3", .linkage = common.linkage });
+ @export(__mulxc3, .{ .name = "__mulxc3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/mulxf3.zig b/lib/compiler_rt/mulxf3.zig
index 353d27c290..04213b3a9b 100644
--- a/lib/compiler_rt/mulxf3.zig
+++ b/lib/compiler_rt/mulxf3.zig
@@ -4,7 +4,7 @@ const mulf3 = @import("./mulf3.zig").mulf3;
pub const panic = common.panic;
comptime {
- @export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage });
+ @export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
diff --git a/lib/compiler_rt/negXi2.zig b/lib/compiler_rt/negXi2.zig
index 086f80c6b3..df0ddf9409 100644
--- a/lib/compiler_rt/negXi2.zig
+++ b/lib/compiler_rt/negXi2.zig
@@ -13,9 +13,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__negsi2, .{ .name = "__negsi2", .linkage = common.linkage });
- @export(__negdi2, .{ .name = "__negdi2", .linkage = common.linkage });
- @export(__negti2, .{ .name = "__negti2", .linkage = common.linkage });
+ @export(__negsi2, .{ .name = "__negsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__negdi2, .{ .name = "__negdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__negti2, .{ .name = "__negti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __negsi2(a: i32) callconv(.C) i32 {
diff --git a/lib/compiler_rt/negdf2.zig b/lib/compiler_rt/negdf2.zig
index c730ada7e0..25f27a0a1e 100644
--- a/lib/compiler_rt/negdf2.zig
+++ b/lib/compiler_rt/negdf2.zig
@@ -4,9 +4,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = common.linkage });
+ @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__negdf2, .{ .name = "__negdf2", .linkage = common.linkage });
+ @export(__negdf2, .{ .name = "__negdf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/neghf2.zig b/lib/compiler_rt/neghf2.zig
index fe55a751d8..feee02afb0 100644
--- a/lib/compiler_rt/neghf2.zig
+++ b/lib/compiler_rt/neghf2.zig
@@ -3,7 +3,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__neghf2, .{ .name = "__neghf2", .linkage = common.linkage });
+ @export(__neghf2, .{ .name = "__neghf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __neghf2(a: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/negsf2.zig b/lib/compiler_rt/negsf2.zig
index 4cb32097ba..1b9e10e12c 100644
--- a/lib/compiler_rt/negsf2.zig
+++ b/lib/compiler_rt/negsf2.zig
@@ -4,9 +4,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = common.linkage });
+ @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__negsf2, .{ .name = "__negsf2", .linkage = common.linkage });
+ @export(__negsf2, .{ .name = "__negsf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/negtf2.zig b/lib/compiler_rt/negtf2.zig
index c1c1e97802..46d498ab97 100644
--- a/lib/compiler_rt/negtf2.zig
+++ b/lib/compiler_rt/negtf2.zig
@@ -3,7 +3,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__negtf2, .{ .name = "__negtf2", .linkage = common.linkage });
+ @export(__negtf2, .{ .name = "__negtf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __negtf2(a: f128) callconv(.C) f128 {
diff --git a/lib/compiler_rt/negv.zig b/lib/compiler_rt/negv.zig
index 361cd80ee7..5a26dc65e6 100644
--- a/lib/compiler_rt/negv.zig
+++ b/lib/compiler_rt/negv.zig
@@ -8,9 +8,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__negvsi2, .{ .name = "__negvsi2", .linkage = common.linkage });
- @export(__negvdi2, .{ .name = "__negvdi2", .linkage = common.linkage });
- @export(__negvti2, .{ .name = "__negvti2", .linkage = common.linkage });
+ @export(__negvsi2, .{ .name = "__negvsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__negvdi2, .{ .name = "__negvdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__negvti2, .{ .name = "__negvti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __negvsi2(a: i32) callconv(.C) i32 {
diff --git a/lib/compiler_rt/negxf2.zig b/lib/compiler_rt/negxf2.zig
index 4e8258453b..13a6ef2cab 100644
--- a/lib/compiler_rt/negxf2.zig
+++ b/lib/compiler_rt/negxf2.zig
@@ -3,7 +3,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__negxf2, .{ .name = "__negxf2", .linkage = common.linkage });
+ @export(__negxf2, .{ .name = "__negxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __negxf2(a: f80) callconv(.C) f80 {
diff --git a/lib/compiler_rt/parity.zig b/lib/compiler_rt/parity.zig
index 2f48a38bff..ee6abf162e 100644
--- a/lib/compiler_rt/parity.zig
+++ b/lib/compiler_rt/parity.zig
@@ -8,9 +8,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__paritysi2, .{ .name = "__paritysi2", .linkage = common.linkage });
- @export(__paritydi2, .{ .name = "__paritydi2", .linkage = common.linkage });
- @export(__parityti2, .{ .name = "__parityti2", .linkage = common.linkage });
+ @export(__paritysi2, .{ .name = "__paritysi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__paritydi2, .{ .name = "__paritydi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__parityti2, .{ .name = "__parityti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __paritysi2(a: i32) callconv(.C) i32 {
diff --git a/lib/compiler_rt/popcount.zig b/lib/compiler_rt/popcount.zig
index 803e93f35a..ddb0b720c7 100644
--- a/lib/compiler_rt/popcount.zig
+++ b/lib/compiler_rt/popcount.zig
@@ -13,9 +13,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = common.linkage });
- @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = common.linkage });
- @export(__popcountti2, .{ .name = "__popcountti2", .linkage = common.linkage });
+ @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__popcountti2, .{ .name = "__popcountti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __popcountsi2(a: i32) callconv(.C) i32 {
diff --git a/lib/compiler_rt/round.zig b/lib/compiler_rt/round.zig
index 8f4b390361..121371fa17 100644
--- a/lib/compiler_rt/round.zig
+++ b/lib/compiler_rt/round.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__roundh, .{ .name = "__roundh", .linkage = common.linkage });
- @export(roundf, .{ .name = "roundf", .linkage = common.linkage });
- @export(round, .{ .name = "round", .linkage = common.linkage });
- @export(__roundx, .{ .name = "__roundx", .linkage = common.linkage });
+ @export(__roundh, .{ .name = "__roundh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(roundf, .{ .name = "roundf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(round, .{ .name = "round", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__roundx, .{ .name = "__roundx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(roundq, .{ .name = "roundf128", .linkage = common.linkage });
+ @export(roundq, .{ .name = "roundf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(roundq, .{ .name = "roundq", .linkage = common.linkage });
- @export(roundl, .{ .name = "roundl", .linkage = common.linkage });
+ @export(roundq, .{ .name = "roundq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(roundl, .{ .name = "roundl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __roundh(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/shift.zig b/lib/compiler_rt/shift.zig
index e38c3973bc..6d711ee553 100644
--- a/lib/compiler_rt/shift.zig
+++ b/lib/compiler_rt/shift.zig
@@ -7,18 +7,18 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__ashlti3, .{ .name = "__ashlti3", .linkage = common.linkage });
- @export(__ashrti3, .{ .name = "__ashrti3", .linkage = common.linkage });
- @export(__lshrti3, .{ .name = "__lshrti3", .linkage = common.linkage });
+ @export(__ashlti3, .{ .name = "__ashlti3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ashrti3, .{ .name = "__ashrti3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lshrti3, .{ .name = "__lshrti3", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_aeabi) {
- @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = common.linkage });
- @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = common.linkage });
- @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = common.linkage });
+ @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__ashldi3, .{ .name = "__ashldi3", .linkage = common.linkage });
- @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = common.linkage });
- @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = common.linkage });
+ @export(__ashldi3, .{ .name = "__ashldi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/sin.zig b/lib/compiler_rt/sin.zig
index b6d388bf0a..eb3d64b0c8 100644
--- a/lib/compiler_rt/sin.zig
+++ b/lib/compiler_rt/sin.zig
@@ -18,15 +18,15 @@ const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
pub const panic = common.panic;
comptime {
- @export(__sinh, .{ .name = "__sinh", .linkage = common.linkage });
- @export(sinf, .{ .name = "sinf", .linkage = common.linkage });
- @export(sin, .{ .name = "sin", .linkage = common.linkage });
- @export(__sinx, .{ .name = "__sinx", .linkage = common.linkage });
+ @export(__sinh, .{ .name = "__sinh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sinf, .{ .name = "sinf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sin, .{ .name = "sin", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__sinx, .{ .name = "__sinx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(sinq, .{ .name = "sinf128", .linkage = common.linkage });
+ @export(sinq, .{ .name = "sinf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(sinq, .{ .name = "sinq", .linkage = common.linkage });
- @export(sinl, .{ .name = "sinl", .linkage = common.linkage });
+ @export(sinq, .{ .name = "sinq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sinl, .{ .name = "sinl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __sinh(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/sincos.zig b/lib/compiler_rt/sincos.zig
index 30a2b55d95..769c8d8389 100644
--- a/lib/compiler_rt/sincos.zig
+++ b/lib/compiler_rt/sincos.zig
@@ -10,15 +10,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__sincosh, .{ .name = "__sincosh", .linkage = common.linkage });
- @export(sincosf, .{ .name = "sincosf", .linkage = common.linkage });
- @export(sincos, .{ .name = "sincos", .linkage = common.linkage });
- @export(__sincosx, .{ .name = "__sincosx", .linkage = common.linkage });
+ @export(__sincosh, .{ .name = "__sincosh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sincosf, .{ .name = "sincosf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sincos, .{ .name = "sincos", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__sincosx, .{ .name = "__sincosx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(sincosq, .{ .name = "sincosf128", .linkage = common.linkage });
+ @export(sincosq, .{ .name = "sincosf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(sincosq, .{ .name = "sincosq", .linkage = common.linkage });
- @export(sincosl, .{ .name = "sincosl", .linkage = common.linkage });
+ @export(sincosq, .{ .name = "sincosq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sincosl, .{ .name = "sincosl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig
index 924a8dc9f9..2ec9c39e0b 100644
--- a/lib/compiler_rt/sqrt.zig
+++ b/lib/compiler_rt/sqrt.zig
@@ -7,15 +7,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__sqrth, .{ .name = "__sqrth", .linkage = common.linkage });
- @export(sqrtf, .{ .name = "sqrtf", .linkage = common.linkage });
- @export(sqrt, .{ .name = "sqrt", .linkage = common.linkage });
- @export(__sqrtx, .{ .name = "__sqrtx", .linkage = common.linkage });
+ @export(__sqrth, .{ .name = "__sqrth", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sqrtf, .{ .name = "sqrtf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sqrt, .{ .name = "sqrt", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__sqrtx, .{ .name = "__sqrtx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(sqrtq, .{ .name = "sqrtf128", .linkage = common.linkage });
+ @export(sqrtq, .{ .name = "sqrtf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(sqrtq, .{ .name = "sqrtq", .linkage = common.linkage });
- @export(sqrtl, .{ .name = "sqrtl", .linkage = common.linkage });
+ @export(sqrtq, .{ .name = "sqrtq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(sqrtl, .{ .name = "sqrtl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __sqrth(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/subdf3.zig b/lib/compiler_rt/subdf3.zig
index 9d62ffe480..a7630b6ea2 100644
--- a/lib/compiler_rt/subdf3.zig
+++ b/lib/compiler_rt/subdf3.zig
@@ -4,9 +4,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = common.linkage });
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__subdf3, .{ .name = "__subdf3", .linkage = common.linkage });
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/subhf3.zig b/lib/compiler_rt/subhf3.zig
index b14da2d794..f1d648102b 100644
--- a/lib/compiler_rt/subhf3.zig
+++ b/lib/compiler_rt/subhf3.zig
@@ -3,7 +3,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__subhf3, .{ .name = "__subhf3", .linkage = common.linkage });
+ @export(__subhf3, .{ .name = "__subhf3", .linkage = common.linkage, .visibility = common.visibility });
}
fn __subhf3(a: f16, b: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/subo.zig b/lib/compiler_rt/subo.zig
index a7dcf258aa..b3542a9e9d 100644
--- a/lib/compiler_rt/subo.zig
+++ b/lib/compiler_rt/subo.zig
@@ -10,9 +10,9 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__subosi4, .{ .name = "__subosi4", .linkage = common.linkage });
- @export(__subodi4, .{ .name = "__subodi4", .linkage = common.linkage });
- @export(__suboti4, .{ .name = "__suboti4", .linkage = common.linkage });
+ @export(__subosi4, .{ .name = "__subosi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__subodi4, .{ .name = "__subodi4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__suboti4, .{ .name = "__suboti4", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
diff --git a/lib/compiler_rt/subsf3.zig b/lib/compiler_rt/subsf3.zig
index 472bccc899..fbc48ead41 100644
--- a/lib/compiler_rt/subsf3.zig
+++ b/lib/compiler_rt/subsf3.zig
@@ -4,9 +4,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = common.linkage });
+ @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__subsf3, .{ .name = "__subsf3", .linkage = common.linkage });
+ @export(__subsf3, .{ .name = "__subsf3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/subtf3.zig b/lib/compiler_rt/subtf3.zig
index 4a0d833f57..0008905c94 100644
--- a/lib/compiler_rt/subtf3.zig
+++ b/lib/compiler_rt/subtf3.zig
@@ -4,11 +4,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__subtf3, .{ .name = "__subkf3", .linkage = common.linkage });
+ @export(__subtf3, .{ .name = "__subkf3", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = common.linkage });
+ @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__subtf3, .{ .name = "__subtf3", .linkage = common.linkage });
+ @export(__subtf3, .{ .name = "__subtf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
diff --git a/lib/compiler_rt/subxf3.zig b/lib/compiler_rt/subxf3.zig
index a143f10ffe..5b3680c2c5 100644
--- a/lib/compiler_rt/subxf3.zig
+++ b/lib/compiler_rt/subxf3.zig
@@ -4,7 +4,7 @@ const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
- @export(__subxf3, .{ .name = "__subxf3", .linkage = common.linkage });
+ @export(__subxf3, .{ .name = "__subxf3", .linkage = common.linkage, .visibility = common.visibility });
}
fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
diff --git a/lib/compiler_rt/tan.zig b/lib/compiler_rt/tan.zig
index 8b8f8287a3..d6ed881afc 100644
--- a/lib/compiler_rt/tan.zig
+++ b/lib/compiler_rt/tan.zig
@@ -20,15 +20,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__tanh, .{ .name = "__tanh", .linkage = common.linkage });
- @export(tanf, .{ .name = "tanf", .linkage = common.linkage });
- @export(tan, .{ .name = "tan", .linkage = common.linkage });
- @export(__tanx, .{ .name = "__tanx", .linkage = common.linkage });
+ @export(__tanh, .{ .name = "__tanh", .linkage = common.linkage, .visibility = common.visibility });
+ @export(tanf, .{ .name = "tanf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(tan, .{ .name = "tan", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__tanx, .{ .name = "__tanx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(tanq, .{ .name = "tanf128", .linkage = common.linkage });
+ @export(tanq, .{ .name = "tanf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(tanq, .{ .name = "tanq", .linkage = common.linkage });
- @export(tanl, .{ .name = "tanl", .linkage = common.linkage });
+ @export(tanq, .{ .name = "tanq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(tanl, .{ .name = "tanl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __tanh(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/trunc.zig b/lib/compiler_rt/trunc.zig
index f600dff61f..8c66ba69e7 100644
--- a/lib/compiler_rt/trunc.zig
+++ b/lib/compiler_rt/trunc.zig
@@ -14,15 +14,15 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
- @export(__trunch, .{ .name = "__trunch", .linkage = common.linkage });
- @export(truncf, .{ .name = "truncf", .linkage = common.linkage });
- @export(trunc, .{ .name = "trunc", .linkage = common.linkage });
- @export(__truncx, .{ .name = "__truncx", .linkage = common.linkage });
+ @export(__trunch, .{ .name = "__trunch", .linkage = common.linkage, .visibility = common.visibility });
+ @export(truncf, .{ .name = "truncf", .linkage = common.linkage, .visibility = common.visibility });
+ @export(trunc, .{ .name = "trunc", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__truncx, .{ .name = "__truncx", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_ppc_abi) {
- @export(truncq, .{ .name = "truncf128", .linkage = common.linkage });
+ @export(truncq, .{ .name = "truncf128", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(truncq, .{ .name = "truncq", .linkage = common.linkage });
- @export(truncl, .{ .name = "truncl", .linkage = common.linkage });
+ @export(truncq, .{ .name = "truncq", .linkage = common.linkage, .visibility = common.visibility });
+ @export(truncl, .{ .name = "truncl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __trunch(x: f16) callconv(.C) f16 {
diff --git a/lib/compiler_rt/truncdfhf2.zig b/lib/compiler_rt/truncdfhf2.zig
index a2d3bf1402..a8ec9bb0a5 100644
--- a/lib/compiler_rt/truncdfhf2.zig
+++ b/lib/compiler_rt/truncdfhf2.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = common.linkage });
+ @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage });
+ @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/truncdfsf2.zig b/lib/compiler_rt/truncdfsf2.zig
index 126dfff0fd..85a19988af 100644
--- a/lib/compiler_rt/truncdfsf2.zig
+++ b/lib/compiler_rt/truncdfsf2.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = common.linkage });
+ @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = common.linkage });
+ @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/truncsfhf2.zig b/lib/compiler_rt/truncsfhf2.zig
index 403306979c..80b78c2452 100644
--- a/lib/compiler_rt/truncsfhf2.zig
+++ b/lib/compiler_rt/truncsfhf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.gnu_f16_abi) {
- @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage });
+ @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_aeabi) {
- @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = common.linkage });
+ @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage });
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T {
diff --git a/lib/compiler_rt/trunctfdf2.zig b/lib/compiler_rt/trunctfdf2.zig
index 02be252856..6fa089cd41 100644
--- a/lib/compiler_rt/trunctfdf2.zig
+++ b/lib/compiler_rt/trunctfdf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = common.linkage });
+ @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = common.linkage });
+ @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = common.linkage });
+ @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
diff --git a/lib/compiler_rt/trunctfhf2.zig b/lib/compiler_rt/trunctfhf2.zig
index b764a78455..e963753950 100644
--- a/lib/compiler_rt/trunctfhf2.zig
+++ b/lib/compiler_rt/trunctfhf2.zig
@@ -4,7 +4,7 @@ const truncf = @import("./truncf.zig").truncf;
pub const panic = common.panic;
comptime {
- @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = common.linkage });
+ @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T {
diff --git a/lib/compiler_rt/trunctfsf2.zig b/lib/compiler_rt/trunctfsf2.zig
index 88a3668a93..62c00a81aa 100644
--- a/lib/compiler_rt/trunctfsf2.zig
+++ b/lib/compiler_rt/trunctfsf2.zig
@@ -5,11 +5,11 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = common.linkage });
+ @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
- @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = common.linkage });
+ @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = common.linkage, .visibility = common.visibility });
}
- @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = common.linkage });
+ @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
diff --git a/lib/compiler_rt/trunctfxf2.zig b/lib/compiler_rt/trunctfxf2.zig
index 731f58f192..018057f213 100644
--- a/lib/compiler_rt/trunctfxf2.zig
+++ b/lib/compiler_rt/trunctfxf2.zig
@@ -5,7 +5,7 @@ const trunc_f80 = @import("./truncf.zig").trunc_f80;
pub const panic = common.panic;
comptime {
- @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = common.linkage });
+ @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
@@ -49,14 +49,16 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
const round_bits = a_abs & round_mask;
if (round_bits > halfway) {
// Round to nearest
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ const ov = @addWithOverflow(res.fraction, 1);
+ res.fraction = ov[0];
+ res.exp += ov[1];
+ res.fraction |= @as(u64, ov[1]) << 63; // Restore integer bit after carry
} else if (round_bits == halfway) {
// Ties to even
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ const ov = @addWithOverflow(res.fraction, res.fraction & 1);
+ res.fraction = ov[0];
+ res.exp += ov[1];
+ res.fraction |= @as(u64, ov[1]) << 63; // Restore integer bit after carry
}
if (res.exp == 0) res.fraction &= ~@as(u64, integer_bit); // Remove integer bit for de-normals
}
diff --git a/lib/compiler_rt/truncxfdf2.zig b/lib/compiler_rt/truncxfdf2.zig
index 2b8eaaab8c..059db73ef6 100644
--- a/lib/compiler_rt/truncxfdf2.zig
+++ b/lib/compiler_rt/truncxfdf2.zig
@@ -4,7 +4,7 @@ const trunc_f80 = @import("./truncf.zig").trunc_f80;
pub const panic = common.panic;
comptime {
- @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = common.linkage });
+ @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __truncxfdf2(a: f80) callconv(.C) f64 {
diff --git a/lib/compiler_rt/truncxfhf2.zig b/lib/compiler_rt/truncxfhf2.zig
index 75fdd17841..80a0dd69fe 100644
--- a/lib/compiler_rt/truncxfhf2.zig
+++ b/lib/compiler_rt/truncxfhf2.zig
@@ -4,7 +4,7 @@ const trunc_f80 = @import("./truncf.zig").trunc_f80;
pub const panic = common.panic;
comptime {
- @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = common.linkage });
+ @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __truncxfhf2(a: f80) callconv(.C) common.F16T {
diff --git a/lib/compiler_rt/truncxfsf2.zig b/lib/compiler_rt/truncxfsf2.zig
index 57c0cb7bdf..8185c5089a 100644
--- a/lib/compiler_rt/truncxfsf2.zig
+++ b/lib/compiler_rt/truncxfsf2.zig
@@ -4,7 +4,7 @@ const trunc_f80 = @import("./truncf.zig").trunc_f80;
pub const panic = common.panic;
comptime {
- @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = common.linkage });
+ @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __truncxfsf2(a: f80) callconv(.C) f32 {
diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig
new file mode 100644
index 0000000000..de2427b79f
--- /dev/null
+++ b/lib/compiler_rt/udivmodei4.zig
@@ -0,0 +1,145 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
+const shr = std.math.shr;
+const shl = std.math.shl;
+
+const max_limbs = std.math.divCeil(usize, 65535, 32) catch unreachable; // max supported type is u65535
+
+comptime {
+ @export(__udivei4, .{ .name = "__udivei4", .linkage = common.linkage, .visibility = common.visibility });
+ @export(__umodei4, .{ .name = "__umodei4", .linkage = common.linkage, .visibility = common.visibility });
+}
+
+const endian = builtin.cpu.arch.endian();
+
+/// Get the value of a limb.
+inline fn limb(x: []const u32, i: usize) u32 {
+ return if (endian == .Little) x[i] else x[x.len - 1 - i];
+}
+
+/// Change the value of a limb.
+inline fn limb_set(x: []u32, i: usize, v: u32) void {
+ if (endian == .Little) {
+ x[i] = v;
+ } else {
+ x[x.len - 1 - i] = v;
+ }
+}
+
+// Uses Knuth's Algorithm D, 4.3.1, p. 272.
+fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
+ if (q) |q_| std.mem.set(u32, q_[0..], 0);
+ if (r) |r_| std.mem.set(u32, r_[0..], 0);
+
+ if (u.len == 0 or v.len == 0) return error.DivisionByZero;
+
+ var m = u.len - 1;
+ var n = v.len - 1;
+ while (limb(u, m) == 0) : (m -= 1) {
+ if (m == 0) return;
+ }
+ while (limb(v, n) == 0) : (n -= 1) {
+ if (n == 0) return error.DivisionByZero;
+ }
+
+ if (n > m) {
+ if (r) |r_| std.mem.copy(u32, r_[0..], u[0..]);
+ return;
+ }
+
+ const s = @clz(limb(v, n));
+
+ var vn: [max_limbs]u32 = undefined;
+ var i = n;
+ while (i > 0) : (i -= 1) {
+ limb_set(&vn, i, shl(u32, limb(v, i), s) | shr(u32, limb(v, i - 1), 32 - s));
+ }
+ limb_set(&vn, 0, shl(u32, limb(v, 0), s));
+
+ var un: [max_limbs + 1]u32 = undefined;
+ limb_set(&un, m + 1, shr(u32, limb(u, m), 32 - s));
+ i = m;
+ while (i > 0) : (i -= 1) {
+ limb_set(&un, i, shl(u32, limb(u, i), s) | shr(u32, limb(u, i - 1), 32 - s));
+ }
+ limb_set(&un, 0, shl(u32, limb(u, 0), s));
+
+ var j = m - n;
+ while (true) : (j -= 1) {
+ const uu = (@as(u64, limb(&un, j + n + 1)) << 32) + limb(&un, j + n);
+ var qhat = uu / limb(&vn, n);
+ var rhat = uu % limb(&vn, n);
+
+ while (true) {
+ if (qhat >= (1 << 32) or (n > 0 and qhat * limb(&vn, n - 1) > (rhat << 32) + limb(&un, j + n - 1))) {
+ qhat -= 1;
+ rhat += limb(&vn, n);
+ if (rhat < (1 << 32)) continue;
+ }
+ break;
+ }
+ var carry: u64 = 0;
+ i = 0;
+ while (i <= n) : (i += 1) {
+ const p = qhat * limb(&vn, i);
+ const t = limb(&un, i + j) - carry - @truncate(u32, p);
+ limb_set(&un, i + j, @truncate(u32, t));
+ carry = @intCast(u64, p >> 32) - @intCast(u64, t >> 32);
+ }
+ const t = limb(&un, j + n + 1) - carry;
+ limb_set(&un, j + n + 1, @truncate(u32, t));
+ if (q) |q_| limb_set(q_, j, @truncate(u32, qhat));
+ if (t < 0) {
+ if (q) |q_| limb_set(q_, j, limb(q_, j) - 1);
+ var carry2: u64 = 0;
+ i = 0;
+ while (i <= n) : (i += 1) {
+ const t2 = @as(u64, limb(&un, i + j)) + @as(u64, limb(&vn, i)) + carry2;
+ limb_set(&un, i + j, @truncate(u32, t2));
+ carry2 = t2 >> 32;
+ }
+ limb_set(un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2));
+ }
+ if (j == 0) break;
+ }
+ if (r) |r_| {
+ i = 0;
+ while (i <= n) : (i += 1) {
+ limb_set(r_, i, shr(u32, limb(&un, i), s) | shl(u32, limb(&un, i + 1), 32 - s));
+ }
+ limb_set(r_, n, shr(u32, limb(&un, n), s));
+ }
+}
+
+pub fn __udivei4(r_q: [*c]u32, u_p: [*c]const u32, v_p: [*c]const u32, bits: usize) callconv(.C) void {
+ @setRuntimeSafety(builtin.is_test);
+ const u = u_p[0 .. bits / 32];
+ const v = v_p[0 .. bits / 32];
+ var q = r_q[0 .. bits / 32];
+ @call(.always_inline, divmod, .{ q, null, u, v }) catch unreachable;
+}
+
+pub fn __umodei4(r_p: [*c]u32, u_p: [*c]const u32, v_p: [*c]const u32, bits: usize) callconv(.C) void {
+ @setRuntimeSafety(builtin.is_test);
+ const u = u_p[0 .. bits / 32];
+ const v = v_p[0 .. bits / 32];
+ var r = r_p[0 .. bits / 32];
+ @call(.always_inline, divmod, .{ null, r, u, v }) catch unreachable;
+}
+
+test "__udivei4/__umodei4" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ const RndGen = std.rand.DefaultPrng;
+ var rnd = RndGen.init(42);
+ var i: usize = 10000;
+ while (i > 0) : (i -= 1) {
+ const u = rnd.random().int(u1000);
+ const v = 1 + rnd.random().int(u1200);
+ const q = u / v;
+ const r = u % v;
+ const z = q * v + r;
+ try std.testing.expect(z == u);
+ }
+}
diff --git a/lib/compiler_rt/udivmodti4.zig b/lib/compiler_rt/udivmodti4.zig
index 5ccaa78707..29523fc6e8 100644
--- a/lib/compiler_rt/udivmodti4.zig
+++ b/lib/compiler_rt/udivmodti4.zig
@@ -7,9 +7,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage });
+ @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage });
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/udivti3.zig b/lib/compiler_rt/udivti3.zig
index 094627ad92..748e9b6599 100644
--- a/lib/compiler_rt/udivti3.zig
+++ b/lib/compiler_rt/udivti3.zig
@@ -7,9 +7,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage });
+ @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/umodti3.zig b/lib/compiler_rt/umodti3.zig
index a9aba96b7e..097f9a3855 100644
--- a/lib/compiler_rt/umodti3.zig
+++ b/lib/compiler_rt/umodti3.zig
@@ -7,9 +7,9 @@ pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
- @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage });
+ @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/unorddf2.zig b/lib/compiler_rt/unorddf2.zig
index 66910a18bf..a185b43155 100644
--- a/lib/compiler_rt/unorddf2.zig
+++ b/lib/compiler_rt/unorddf2.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = common.linkage });
+ @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__unorddf2, .{ .name = "__unorddf2", .linkage = common.linkage });
+ @export(__unorddf2, .{ .name = "__unorddf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/unordhf2.zig b/lib/compiler_rt/unordhf2.zig
index 0c2aea629a..65d6ed7617 100644
--- a/lib/compiler_rt/unordhf2.zig
+++ b/lib/compiler_rt/unordhf2.zig
@@ -4,7 +4,7 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__unordhf2, .{ .name = "__unordhf2", .linkage = common.linkage });
+ @export(__unordhf2, .{ .name = "__unordhf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __unordhf2(a: f16, b: f16) callconv(.C) i32 {
diff --git a/lib/compiler_rt/unordsf2.zig b/lib/compiler_rt/unordsf2.zig
index 78b388a75e..e28ad0091c 100644
--- a/lib/compiler_rt/unordsf2.zig
+++ b/lib/compiler_rt/unordsf2.zig
@@ -5,9 +5,9 @@ pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
- @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = common.linkage });
+ @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = common.linkage, .visibility = common.visibility });
} else {
- @export(__unordsf2, .{ .name = "__unordsf2", .linkage = common.linkage });
+ @export(__unordsf2, .{ .name = "__unordsf2", .linkage = common.linkage, .visibility = common.visibility });
}
}
diff --git a/lib/compiler_rt/unordtf2.zig b/lib/compiler_rt/unordtf2.zig
index e00d9cc6d1..2f98982683 100644
--- a/lib/compiler_rt/unordtf2.zig
+++ b/lib/compiler_rt/unordtf2.zig
@@ -5,12 +5,12 @@ pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
- @export(__unordtf2, .{ .name = "__unordkf2", .linkage = common.linkage });
+ @export(__unordtf2, .{ .name = "__unordkf2", .linkage = common.linkage, .visibility = common.visibility });
} else if (common.want_sparc_abi) {
// These exports are handled in cmptf2.zig because unordered comparisons
// are based on calling _Qp_cmp.
}
- @export(__unordtf2, .{ .name = "__unordtf2", .linkage = common.linkage });
+ @export(__unordtf2, .{ .name = "__unordtf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
diff --git a/lib/compiler_rt/unordxf2.zig b/lib/compiler_rt/unordxf2.zig
index e456096370..d0932f253d 100644
--- a/lib/compiler_rt/unordxf2.zig
+++ b/lib/compiler_rt/unordxf2.zig
@@ -4,7 +4,7 @@ const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
- @export(__unordxf2, .{ .name = "__unordxf2", .linkage = common.linkage });
+ @export(__unordxf2, .{ .name = "__unordxf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __unordxf2(a: f80, b: f80) callconv(.C) i32 {
diff --git a/lib/docs/index.html b/lib/docs/index.html
index 00f570d100..fb8544d7f0 100644
--- a/lib/docs/index.html
+++ b/lib/docs/index.html
@@ -633,7 +633,7 @@
<div class="wrap">
<section class="docs">
<div style="position: relative">
- <span id="searchPlaceholder"><kbd>S</kbd> to search, <kbd>?</kbd> for more options</span>
+ <span id="searchPlaceholder"><kbd>s</kbd> to search, <kbd>?</kbd> for more options</span>
<input type="search" class="search" id="search" autocomplete="off" spellcheck="false" disabled>
</div>
<p id="status">Loading...</p>
diff --git a/lib/libc/mingw/stdio/_vscprintf.c b/lib/libc/mingw/stdio/_vscprintf.c
new file mode 100644
index 0000000000..b859b9a2a0
--- /dev/null
+++ b/lib/libc/mingw/stdio/_vscprintf.c
@@ -0,0 +1,86 @@
+/**
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is part of the mingw-w64 runtime package.
+ * No warranty is given; refer to the file DISCLAIMER.PD within this package.
+ */
+#include <windows.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+/* emulation of _vscprintf() via _vsnprintf() */
+static int __cdecl emu_vscprintf(const char * __restrict__ format, va_list arglist)
+{
+ char *buffer, *new_buffer;
+ size_t size;
+ int ret;
+
+ /* if format is a null pointer, _vscprintf() returns -1 and sets errno to EINVAL */
+ if (!format) {
+ _set_errno(EINVAL);
+ return -1;
+ }
+
+ /* size for _vsnprintf() must be non-zero and buffer must have place for terminating null character */
+ size = strlen(format) * 2 + 1;
+ buffer = malloc(size);
+
+ if (!buffer) {
+ _set_errno(ENOMEM);
+ return -1;
+ }
+
+ /* if the number of characters to write is greater than size, _vsnprintf() returns -1 */
+ while (size < SIZE_MAX/2 && (ret = _vsnprintf(buffer, size, format, arglist)) < 0) {
+ /* in this case try with larger buffer */
+ size *= 2;
+ new_buffer = realloc(buffer, size);
+ if (!new_buffer)
+ break;
+ buffer = new_buffer;
+ }
+
+ free(buffer);
+
+ if (ret < 0) {
+ _set_errno(ENOMEM);
+ return -1;
+ }
+
+ return ret;
+}
+
+#ifndef __LIBMSVCRT_OS__
+
+int (__cdecl *__MINGW_IMP_SYMBOL(_vscprintf))(const char * __restrict__, va_list) = emu_vscprintf;
+
+#else
+
+#include <msvcrt.h>
+
+static int __cdecl init_vscprintf(const char * __restrict__ format, va_list arglist);
+
+int (__cdecl *__MINGW_IMP_SYMBOL(_vscprintf))(const char * __restrict__, va_list) = init_vscprintf;
+
+static int __cdecl init_vscprintf(const char * __restrict__ format, va_list arglist)
+{
+ HMODULE msvcrt = __mingw_get_msvcrt_handle();
+ int (__cdecl *func)(const char * __restrict__, va_list) = NULL;
+
+ if (msvcrt)
+ func = (int (__cdecl *)(const char * __restrict__, va_list))GetProcAddress(msvcrt, "_vscprintf");
+
+ if (!func)
+ func = emu_vscprintf;
+
+ return (__MINGW_IMP_SYMBOL(_vscprintf) = func)(format, arglist);
+}
+
+#endif
+
+int __cdecl _vscprintf(const char * __restrict__ format, va_list arglist)
+{
+ return __MINGW_IMP_SYMBOL(_vscprintf)(format, arglist);
+}
diff --git a/lib/libcxx/include/__config b/lib/libcxx/include/__config
index d9a47343da..0aab23b9b4 100644
--- a/lib/libcxx/include/__config
+++ b/lib/libcxx/include/__config
@@ -148,7 +148,7 @@
# endif
// Feature macros for disabling pre ABI v1 features. All of these options
// are deprecated.
-# if defined(__FreeBSD__)
+# if defined(__FreeBSD__) || defined(__DragonFly__)
# define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR
# endif
# endif
@@ -726,11 +726,11 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
# endif // _LIBCPP_CXX03_LANG
# if defined(__APPLE__) || defined(__FreeBSD__) || defined(_LIBCPP_MSVCRT_LIKE) || defined(__sun__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__DragonFly__)
# define _LIBCPP_LOCALE__L_EXTENSIONS 1
# endif
-# ifdef __FreeBSD__
+# if defined(__FreeBSD__) || defined(__DragonFly__)
# define _DECLARE_C99_LDBL_MATH 1
# endif
@@ -750,11 +750,11 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
# define _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
# endif
-# if defined(__APPLE__) || defined(__FreeBSD__)
+# if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
# define _LIBCPP_HAS_DEFAULTRUNELOCALE
# endif
-# if defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun__)
+# if defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun__) || defined(__DragonFly__)
# define _LIBCPP_WCTYPE_IS_MASK
# endif
@@ -901,6 +901,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
# if defined(__FreeBSD__) || \
defined(__wasi__) || \
+ defined(__DragonFly__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__) || \
defined(__NuttX__) || \
diff --git a/lib/libcxx/include/__locale b/lib/libcxx/include/__locale
index 40f9a3ff57..62a439b979 100644
--- a/lib/libcxx/include/__locale
+++ b/lib/libcxx/include/__locale
@@ -33,7 +33,7 @@
# include <__support/newlib/xlocale.h>
#elif defined(__OpenBSD__)
# include <__support/openbsd/xlocale.h>
-#elif (defined(__APPLE__) || defined(__FreeBSD__))
+#elif (defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__))
# include <xlocale.h>
#elif defined(__Fuchsia__)
# include <__support/fuchsia/xlocale.h>
@@ -453,10 +453,10 @@ public:
static const mask __regex_word = 0x4000; // 0x8000 and 0x0100 and 0x00ff are used
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_PRINT
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_ALPHA
-#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__EMSCRIPTEN__) || defined(__NetBSD__)
+#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__EMSCRIPTEN__) || defined(__NetBSD__) || defined(__DragonFly__)
# ifdef __APPLE__
typedef __uint32_t mask;
-# elif defined(__FreeBSD__)
+# elif defined(__FreeBSD__) || defined(__DragonFly__)
typedef unsigned long mask;
# elif defined(__EMSCRIPTEN__) || defined(__NetBSD__)
typedef unsigned short mask;
diff --git a/lib/libcxx/include/locale b/lib/libcxx/include/locale
index b01c66d043..de37d35a69 100644
--- a/lib/libcxx/include/locale
+++ b/lib/libcxx/include/locale
@@ -239,7 +239,7 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-#if defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
# define _LIBCPP_GET_C_LOCALE 0
#elif defined(__NetBSD__)
# define _LIBCPP_GET_C_LOCALE LC_C_LOCALE
diff --git a/lib/libcxx/src/locale.cpp b/lib/libcxx/src/locale.cpp
index 6b454274e1..dc4ee7cbd9 100644
--- a/lib/libcxx/src/locale.cpp
+++ b/lib/libcxx/src/locale.cpp
@@ -1190,7 +1190,7 @@ ctype<char>::classic_table() noexcept
const ctype<char>::mask*
ctype<char>::classic_table() noexcept
{
-#if defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
return _DefaultRuneLocale.__runetype;
#elif defined(__NetBSD__)
return _C_ctype_tab_ + 1;
diff --git a/lib/std/Url.zig b/lib/std/Url.zig
new file mode 100644
index 0000000000..8887f5de92
--- /dev/null
+++ b/lib/std/Url.zig
@@ -0,0 +1,98 @@
+scheme: []const u8,
+host: []const u8,
+path: []const u8,
+port: ?u16,
+
+/// TODO: redo this implementation according to RFC 1738. This code is only a
+/// placeholder for now.
+pub fn parse(s: []const u8) !Url {
+ var scheme_end: usize = 0;
+ var host_start: usize = 0;
+ var host_end: usize = 0;
+ var path_start: usize = 0;
+ var port_start: usize = 0;
+ var port_end: usize = 0;
+ var state: enum {
+ scheme,
+ scheme_slash1,
+ scheme_slash2,
+ host,
+ port,
+ path,
+ } = .scheme;
+
+ for (s) |b, i| switch (state) {
+ .scheme => switch (b) {
+ ':' => {
+ state = .scheme_slash1;
+ scheme_end = i;
+ },
+ else => {},
+ },
+ .scheme_slash1 => switch (b) {
+ '/' => {
+ state = .scheme_slash2;
+ },
+ else => return error.InvalidUrl,
+ },
+ .scheme_slash2 => switch (b) {
+ '/' => {
+ state = .host;
+ host_start = i + 1;
+ },
+ else => return error.InvalidUrl,
+ },
+ .host => switch (b) {
+ ':' => {
+ state = .port;
+ host_end = i;
+ port_start = i + 1;
+ },
+ '/' => {
+ state = .path;
+ host_end = i;
+ path_start = i;
+ },
+ else => {},
+ },
+ .port => switch (b) {
+ '/' => {
+ port_end = i;
+ state = .path;
+ path_start = i;
+ },
+ else => {},
+ },
+ .path => {},
+ };
+
+ const port_slice = s[port_start..port_end];
+ const port = if (port_slice.len == 0) null else try std.fmt.parseInt(u16, port_slice, 10);
+
+ return .{
+ .scheme = s[0..scheme_end],
+ .host = s[host_start..host_end],
+ .path = s[path_start..],
+ .port = port,
+ };
+}
+
+const Url = @This();
+const std = @import("std.zig");
+const testing = std.testing;
+
+test "basic" {
+ const parsed = try parse("https://ziglang.org/download");
+ try testing.expectEqualStrings("https", parsed.scheme);
+ try testing.expectEqualStrings("ziglang.org", parsed.host);
+ try testing.expectEqualStrings("/download", parsed.path);
+ try testing.expectEqual(@as(?u16, null), parsed.port);
+}
+
+test "with port" {
+ const parsed = try parse("http://example:1337/");
+ try testing.expectEqualStrings("http", parsed.scheme);
+ try testing.expectEqualStrings("example", parsed.host);
+ try testing.expectEqualStrings("/", parsed.path);
+ try testing.expectEqual(@as(?u16, 1337), parsed.port);
+}
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index 1d6d86f8f5..cf04a54116 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -2270,13 +2270,13 @@ test "reIndex" {
test "auto store_hash" {
const HasCheapEql = AutoArrayHashMap(i32, i32);
const HasExpensiveEql = AutoArrayHashMap([32]i32, i32);
- try testing.expect(meta.fieldInfo(HasCheapEql.Data, .hash).field_type == void);
- try testing.expect(meta.fieldInfo(HasExpensiveEql.Data, .hash).field_type != void);
+ try testing.expect(meta.fieldInfo(HasCheapEql.Data, .hash).type == void);
+ try testing.expect(meta.fieldInfo(HasExpensiveEql.Data, .hash).type != void);
const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32);
const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32);
- try testing.expect(meta.fieldInfo(HasCheapEqlUn.Data, .hash).field_type == void);
- try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).field_type != void);
+ try testing.expect(meta.fieldInfo(HasCheapEqlUn.Data, .hash).type == void);
+ try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).type != void);
}
test "sort" {
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 044670e867..852c81f140 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -468,6 +468,20 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
+
+ /// Return the last element from the list.
+ /// Asserts the list has at least one item.
+ pub fn getLast(self: *Self) T {
+ const val = self.items[self.items.len - 1];
+ return val;
+ }
+
+ /// Return the last element from the list, or
+ /// return `null` if list is empty.
+ pub fn getLastOrNull(self: *Self) ?T {
+ if (self.items.len == 0) return null;
+ return self.getLast();
+ }
};
}
@@ -913,6 +927,20 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
+
+ /// Return the last element from the list.
+ /// Asserts the list has at least one item.
+ pub fn getLast(self: *Self) T {
+ const val = self.items[self.items.len - 1];
+ return val;
+ }
+
+ /// Return the last element from the list, or
+ /// return `null` if list is empty.
+ pub fn getLastOrNull(self: *Self) ?T {
+ if (self.items.len == 0) return null;
+ return self.getLast();
+ }
};
}
@@ -1604,3 +1632,20 @@ test "std.ArrayList(u0)" {
}
try testing.expectEqual(count, 3);
}
+
+test "std.ArrayList(?u32).popOrNull()" {
+ const a = testing.allocator;
+
+ var list = ArrayList(?u32).init(a);
+ defer list.deinit();
+
+ try list.append(null);
+ try list.append(1);
+ try list.append(2);
+ try testing.expectEqual(list.items.len, 3);
+
+ try testing.expect(list.popOrNull().? == @as(u32, 2));
+ try testing.expect(list.popOrNull().? == @as(u32, 1));
+ try testing.expect(list.popOrNull().? == null);
+ try testing.expect(list.popOrNull() == null);
+}
diff --git a/lib/std/build/CheckObjectStep.zig b/lib/std/build/CheckObjectStep.zig
index 7bebea54a0..4ef350b418 100644
--- a/lib/std/build/CheckObjectStep.zig
+++ b/lib/std/build/CheckObjectStep.zig
@@ -126,22 +126,30 @@ const Action = struct {
/// its reduced, computed value compares using `op` with the expected value, either
/// a literal or another extracted variable.
fn computeCmp(act: Action, gpa: Allocator, global_vars: anytype) !bool {
- var op_stack = std.ArrayList(enum { add }).init(gpa);
+ var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
var values = std.ArrayList(u64).init(gpa);
var it = mem.tokenize(u8, act.phrase, " ");
while (it.next()) |next| {
if (mem.eql(u8, next, "+")) {
try op_stack.append(.add);
+ } else if (mem.eql(u8, next, "-")) {
+ try op_stack.append(.sub);
+ } else if (mem.eql(u8, next, "%")) {
+ try op_stack.append(.mod);
+ } else if (mem.eql(u8, next, "*")) {
+ try op_stack.append(.mul);
} else {
- const val = global_vars.get(next) orelse {
- std.debug.print(
- \\
- \\========= Variable was not extracted: ===========
- \\{s}
- \\
- , .{next});
- return error.UnknownVariable;
+ const val = std.fmt.parseInt(u64, next, 0) catch blk: {
+ break :blk global_vars.get(next) orelse {
+ std.debug.print(
+ \\
+ \\========= Variable was not extracted: ===========
+ \\{s}
+ \\
+ , .{next});
+ return error.UnknownVariable;
+ };
};
try values.append(val);
}
@@ -155,7 +163,17 @@ const Action = struct {
.add => {
reduced += other;
},
+ .sub => {
+ reduced -= other;
+ },
+ .mod => {
+ reduced %= other;
+ },
+ .mul => {
+ reduced *= other;
+ },
}
+ op_i += 1;
}
const exp_value = switch (act.expected.?.value) {
@@ -571,6 +589,92 @@ const MachODumper = struct {
});
},
+ .UUID => {
+ const uuid = lc.cast(macho.uuid_command).?;
+ try writer.writeByte('\n');
+ try writer.print("uuid {x}", .{std.fmt.fmtSliceHexLower(&uuid.uuid)});
+ },
+
+ .DATA_IN_CODE,
+ .FUNCTION_STARTS,
+ .CODE_SIGNATURE,
+ => {
+ const llc = lc.cast(macho.linkedit_data_command).?;
+ try writer.writeByte('\n');
+ try writer.print(
+ \\dataoff {x}
+ \\datasize {x}
+ , .{ llc.dataoff, llc.datasize });
+ },
+
+ .DYLD_INFO_ONLY => {
+ const dlc = lc.cast(macho.dyld_info_command).?;
+ try writer.writeByte('\n');
+ try writer.print(
+ \\rebaseoff {x}
+ \\rebasesize {x}
+ \\bindoff {x}
+ \\bindsize {x}
+ \\weakbindoff {x}
+ \\weakbindsize {x}
+ \\lazybindoff {x}
+ \\lazybindsize {x}
+ \\exportoff {x}
+ \\exportsize {x}
+ , .{
+ dlc.rebase_off,
+ dlc.rebase_size,
+ dlc.bind_off,
+ dlc.bind_size,
+ dlc.weak_bind_off,
+ dlc.weak_bind_size,
+ dlc.lazy_bind_off,
+ dlc.lazy_bind_size,
+ dlc.export_off,
+ dlc.export_size,
+ });
+ },
+
+ .SYMTAB => {
+ const slc = lc.cast(macho.symtab_command).?;
+ try writer.writeByte('\n');
+ try writer.print(
+ \\symoff {x}
+ \\nsyms {x}
+ \\stroff {x}
+ \\strsize {x}
+ , .{
+ slc.symoff,
+ slc.nsyms,
+ slc.stroff,
+ slc.strsize,
+ });
+ },
+
+ .DYSYMTAB => {
+ const dlc = lc.cast(macho.dysymtab_command).?;
+ try writer.writeByte('\n');
+ try writer.print(
+ \\ilocalsym {x}
+ \\nlocalsym {x}
+ \\iextdefsym {x}
+ \\nextdefsym {x}
+ \\iundefsym {x}
+ \\nundefsym {x}
+ \\indirectsymoff {x}
+ \\nindirectsyms {x}
+ , .{
+ dlc.ilocalsym,
+ dlc.nlocalsym,
+ dlc.iextdefsym,
+ dlc.nextdefsym,
+ dlc.iundefsym,
+ dlc.nundefsym,
+ dlc.indirectsymoff,
+ dlc.nindirectsyms,
+ });
+ },
+
else => {},
}
}
diff --git a/lib/std/build/LibExeObjStep.zig b/lib/std/build/LibExeObjStep.zig
index f992fa0c1e..0b30d138e8 100644
--- a/lib/std/build/LibExeObjStep.zig
+++ b/lib/std/build/LibExeObjStep.zig
@@ -74,6 +74,9 @@ disable_sanitize_c: bool,
sanitize_thread: bool,
rdynamic: bool,
import_memory: bool = false,
+/// For WebAssembly targets, this will allow for undefined symbols to
+/// be imported from the host environment.
+import_symbols: bool = false,
import_table: bool = false,
export_table: bool = false,
initial_memory: ?u64 = null,
@@ -1458,6 +1461,9 @@ fn make(step: *Step) !void {
if (self.import_memory) {
try zig_args.append("--import-memory");
}
+ if (self.import_symbols) {
+ try zig_args.append("--import-symbols");
+ }
if (self.import_table) {
try zig_args.append("--import-table");
}
@@ -1609,8 +1615,6 @@ fn make(step: *Step) !void {
try zig_args.append(bin_name);
try zig_args.append("--test-cmd");
try zig_args.append("--dir=.");
- try zig_args.append("--test-cmd");
- try zig_args.append("--allow-unknown-exports"); // TODO: Remove when stage2 is default compiler
try zig_args.append("--test-cmd-bin");
} else {
try zig_args.append("--test-no-exec");
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index eb1212607d..c08af8ec44 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -51,7 +51,7 @@ pub const StackTrace = struct {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
};
- const tty_config = std.debug.detectTTYConfig();
+ const tty_config = std.debug.detectTTYConfig(std.io.getStdErr());
try writer.writeAll("\n");
std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
@@ -220,15 +220,13 @@ pub const Type = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const Int = struct {
signedness: Signedness,
- /// TODO make this u16 instead of comptime_int
- bits: comptime_int,
+ bits: u16,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Float = struct {
- /// TODO make this u16 instead of comptime_int
- bits: comptime_int,
+ bits: u16,
};
/// This data structure is used by the Zig language code generation and
@@ -282,8 +280,7 @@ pub const Type = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const StructField = struct {
name: []const u8,
- /// TODO rename to `type`
- field_type: type,
+ type: type,
default_value: ?*const anyopaque,
is_comptime: bool,
alignment: comptime_int,
@@ -333,8 +330,6 @@ pub const Type = union(enum) {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Enum = struct {
- /// TODO enums should no longer have this field in type info.
- layout: ContainerLayout,
tag_type: type,
fields: []const EnumField,
decls: []const Declaration,
@@ -345,7 +340,7 @@ pub const Type = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const UnionField = struct {
name: []const u8,
- field_type: type,
+ type: type,
alignment: comptime_int,
};
@@ -369,14 +364,14 @@ pub const Type = union(enum) {
is_var_args: bool,
/// TODO change the language spec to make this not optional.
return_type: ?type,
- args: []const Param,
+ params: []const Param,
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Param = struct {
is_generic: bool,
is_noalias: bool,
- arg_type: ?type,
+ type: ?type,
};
};
@@ -627,6 +622,87 @@ pub const CallModifier = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
+pub const VaListAarch64 = extern struct {
+ __stack: *anyopaque,
+ __gr_top: *anyopaque,
+ __vr_top: *anyopaque,
+ __gr_offs: c_int,
+ __vr_offs: c_int,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListHexagon = extern struct {
+ __gpr: c_long,
+ __fpr: c_long,
+ __overflow_arg_area: *anyopaque,
+ __reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListPowerPc = extern struct {
+ gpr: u8,
+ fpr: u8,
+ reserved: c_ushort,
+ overflow_arg_area: *anyopaque,
+ reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListS390x = extern struct {
+ __current_saved_reg_area_pointer: *anyopaque,
+ __saved_reg_area_end_pointer: *anyopaque,
+ __overflow_area_pointer: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListX86_64 = extern struct {
+ gp_offset: c_uint,
+ fp_offset: c_uint,
+ overflow_arg_area: *anyopaque,
+ reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaList = switch (builtin.cpu.arch) {
+ .aarch64 => switch (builtin.os.tag) {
+ .windows => *u8,
+ .ios, .macos, .tvos, .watchos => *u8,
+ else => @compileError("disabled due to miscompilations"), // VaListAarch64,
+ },
+ .arm => switch (builtin.os.tag) {
+ .ios, .macos, .tvos, .watchos => *u8,
+ else => *anyopaque,
+ },
+ .amdgcn => *u8,
+ .avr => *anyopaque,
+ .bpfel, .bpfeb => *anyopaque,
+ .hexagon => if (builtin.target.isMusl()) VaListHexagon else *u8,
+ .mips, .mipsel, .mips64, .mips64el => *anyopaque,
+ .riscv32, .riscv64 => *anyopaque,
+ .powerpc, .powerpcle => switch (builtin.os.tag) {
+ .ios, .macos, .tvos, .watchos, .aix => *u8,
+ else => VaListPowerPc,
+ },
+ .powerpc64, .powerpc64le => *u8,
+ .sparc, .sparcel, .sparc64 => *anyopaque,
+ .spirv32, .spirv64 => *anyopaque,
+ .s390x => VaListS390x,
+ .wasm32, .wasm64 => *anyopaque,
+ .x86 => *u8,
+ .x86_64 => switch (builtin.os.tag) {
+ .windows => @compileError("disabled due to miscompilations"), // *u8,
+ else => VaListX86_64,
+ },
+ else => @compileError("VaList not supported for this target yet"),
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
pub const PrefetchOptions = struct {
/// Whether the prefetch should prepare for a read or a write.
rw: Rw = .read,
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 5f03f1c619..212b8e2d4d 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -206,7 +206,7 @@ pub extern "c" fn sendto(
dest_addr: ?*const c.sockaddr,
addrlen: c.socklen_t,
) isize;
-pub extern "c" fn sendmsg(sockfd: c.fd_t, msg: *const std.x.os.Socket.Message, flags: c_int) isize;
+pub extern "c" fn sendmsg(sockfd: c.fd_t, msg: *const c.msghdr_const, flags: u32) isize;
pub extern "c" fn recv(sockfd: c.fd_t, arg1: ?*anyopaque, arg2: usize, arg3: c_int) isize;
pub extern "c" fn recvfrom(
@@ -217,7 +217,7 @@ pub extern "c" fn recvfrom(
noalias src_addr: ?*c.sockaddr,
noalias addrlen: ?*c.socklen_t,
) isize;
-pub extern "c" fn recvmsg(sockfd: c.fd_t, msg: *std.x.os.Socket.Message, flags: c_int) isize;
+pub extern "c" fn recvmsg(sockfd: c.fd_t, msg: *c.msghdr, flags: u32) isize;
pub extern "c" fn kill(pid: c.pid_t, sig: c_int) c_int;
pub extern "c" fn getdirentries(fd: c.fd_t, buf_ptr: [*]u8, nbytes: usize, basep: *i64) isize;
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index b68f04379f..9c5ac1e93a 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -1007,7 +1007,16 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [126]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
family: sa_family_t = AF.INET,
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index 2410310fc7..b632211307 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -1,5 +1,6 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
+const assert = std.debug.assert;
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
@@ -12,6 +13,7 @@ pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize;
pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
+pub extern "c" fn arc4random_buf(buf: [*]u8, len: usize) void;
pub const dl_iterate_phdr_callback = *const fn (info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*anyopaque) c_int;
@@ -419,6 +421,7 @@ pub const F = struct {
pub const DUP2FD = 10;
pub const DUPFD_CLOEXEC = 17;
pub const DUP2FD_CLOEXEC = 18;
+ pub const GETPATH = 19;
};
pub const FD_CLOEXEC = 1;
@@ -476,11 +479,20 @@ pub const CLOCK = struct {
pub const sockaddr = extern struct {
len: u8,
- family: u8,
+ family: sa_family_t,
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [126]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index c4bd4a44a7..7a4e30b909 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
@@ -15,12 +16,15 @@ pub extern "c" fn pthread_getthreadid_np() c_int;
pub extern "c" fn pthread_set_name_np(thread: std.c.pthread_t, name: [*:0]const u8) void;
pub extern "c" fn pthread_get_name_np(thread: std.c.pthread_t, name: [*:0]u8, len: usize) void;
pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
+pub extern "c" fn arc4random_buf(buf: [*]u8, len: usize) void;
pub extern "c" fn posix_memalign(memptr: *?*anyopaque, alignment: usize, size: usize) c_int;
pub extern "c" fn malloc_usable_size(?*const anyopaque) usize;
pub extern "c" fn getpid() pid_t;
+pub extern "c" fn kinfo_getfile(pid: pid_t, cntp: *c_int) ?[*]kinfo_file;
+
pub const sf_hdtr = extern struct {
headers: [*]const iovec_const,
hdr_cnt: c_int,
@@ -401,7 +405,16 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [126]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index 86b9f25902..9c4f8460de 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
@@ -339,7 +340,16 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [126]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index d72fd15a57..470c5e8437 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -320,9 +320,7 @@ pub const pthread_rwlock_t = switch (native_abi) {
size: [56]u8 align(@alignOf(usize)) = [_]u8{0} ** 56,
},
};
-pub usingnamespace if (native_abi == .android) struct {
- pub const pthread_key_t = c_int;
-} else struct {};
+pub const pthread_key_t = c_uint;
pub const sem_t = extern struct {
__size: [__SIZEOF_SEM_T]u8 align(@alignOf(usize)),
};
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index d9bf925c17..b963b2e2b1 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
@@ -481,7 +482,16 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [126]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
@@ -537,6 +547,7 @@ pub const KERN = struct {
};
pub const PATH_MAX = 1024;
+pub const NAME_MAX = 255;
pub const IOV_MAX = KERN.IOV_MAX;
pub const STDIN_FILENO = 0;
@@ -689,13 +700,17 @@ pub const F = struct {
pub const SETFD = 2;
pub const GETFL = 3;
pub const SETFL = 4;
-
pub const GETOWN = 5;
pub const SETOWN = 6;
-
pub const GETLK = 7;
pub const SETLK = 8;
pub const SETLKW = 9;
+ pub const CLOSEM = 10;
+ pub const MAXFD = 11;
+ pub const DUPFD_CLOEXEC = 12;
+ pub const GETNOSIGPIPE = 13;
+ pub const SETNOSIGPIPE = 14;
+ pub const GETPATH = 15;
pub const RDLCK = 1;
pub const WRLCK = 3;
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 83aed68483..51c4bcb6dd 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const assert = std.debug.assert;
const maxInt = std.math.maxInt;
const builtin = @import("builtin");
const iovec = std.os.iovec;
@@ -372,7 +373,16 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 256;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ len: u8 align(8),
+ family: sa_family_t,
+ padding: [254]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
len: u8 = @sizeOf(in),
@@ -417,6 +427,7 @@ pub const AI = struct {
};
pub const PATH_MAX = 1024;
+pub const NAME_MAX = 255;
pub const IOV_MAX = 1024;
pub const STDIN_FILENO = 0;
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index cbeeb5fb42..fe60c426e5 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
@@ -435,7 +436,15 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 256;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ family: sa_family_t align(8),
+ padding: [254]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
pub const in = extern struct {
family: sa_family_t = AF.INET,
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index b706f0aecb..4a816c8318 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -946,75 +946,112 @@ pub const ChildProcess = struct {
defer if (maybe_envp_buf) |envp_buf| self.allocator.free(envp_buf);
const envp_ptr = if (maybe_envp_buf) |envp_buf| envp_buf.ptr else null;
+ const app_name_utf8 = self.argv[0];
+ const app_name_is_absolute = fs.path.isAbsolute(app_name_utf8);
+
// the cwd set in ChildProcess is in effect when choosing the executable path
// to match posix semantics
- const app_path = x: {
- if (self.cwd) |cwd| {
- const resolved = try fs.path.resolve(self.allocator, &[_][]const u8{ cwd, self.argv[0] });
- defer self.allocator.free(resolved);
- break :x try cstr.addNullByte(self.allocator, resolved);
+ var cwd_path_w_needs_free = false;
+ const cwd_path_w = x: {
+ // If the app name is absolute, then we need to use its dirname as the cwd
+ if (app_name_is_absolute) {
+ cwd_path_w_needs_free = true;
+ const dir = fs.path.dirname(app_name_utf8).?;
+ break :x try unicode.utf8ToUtf16LeWithNull(self.allocator, dir);
+ } else if (self.cwd) |cwd| {
+ cwd_path_w_needs_free = true;
+ break :x try unicode.utf8ToUtf16LeWithNull(self.allocator, cwd);
} else {
- break :x try cstr.addNullByte(self.allocator, self.argv[0]);
+ break :x &[_:0]u16{}; // empty for cwd
+ }
+ };
+ defer if (cwd_path_w_needs_free) self.allocator.free(cwd_path_w);
+
+ // If the app name has more than just a filename, then we need to separate that
+ // into the basename and dirname and use the dirname as an addition to the cwd
+ // path. This is because NtQueryDirectoryFile cannot accept FileName params with
+ // path separators.
+ const app_basename_utf8 = fs.path.basename(app_name_utf8);
+ // If the app name is absolute, then the cwd will already have the app's dirname in it,
+ // so only populate app_dirname if app name is a relative path with > 0 path separators.
+ const maybe_app_dirname_utf8 = if (!app_name_is_absolute) fs.path.dirname(app_name_utf8) else null;
+ const app_dirname_w: ?[:0]u16 = x: {
+ if (maybe_app_dirname_utf8) |app_dirname_utf8| {
+ break :x try unicode.utf8ToUtf16LeWithNull(self.allocator, app_dirname_utf8);
}
+ break :x null;
};
- defer self.allocator.free(app_path);
+ defer if (app_dirname_w != null) self.allocator.free(app_dirname_w.?);
- const app_path_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, app_path);
- defer self.allocator.free(app_path_w);
+ const app_name_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, app_basename_utf8);
+ defer self.allocator.free(app_name_w);
const cmd_line_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, cmd_line);
defer self.allocator.free(cmd_line_w);
- windowsCreateProcess(app_path_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
- if (no_path_err != error.FileNotFound) return no_path_err;
+ exec: {
+ const PATH: [:0]const u16 = std.os.getenvW(unicode.utf8ToUtf16LeStringLiteral("PATH")) orelse &[_:0]u16{};
+ const PATHEXT: [:0]const u16 = std.os.getenvW(unicode.utf8ToUtf16LeStringLiteral("PATHEXT")) orelse &[_:0]u16{};
- var free_path = true;
- const PATH = process.getEnvVarOwned(self.allocator, "PATH") catch |err| switch (err) {
- error.EnvironmentVariableNotFound => blk: {
- free_path = false;
- break :blk "";
- },
- else => |e| return e,
- };
- defer if (free_path) self.allocator.free(PATH);
+ var app_buf = std.ArrayListUnmanaged(u16){};
+ defer app_buf.deinit(self.allocator);
- var free_path_ext = true;
- const PATHEXT = process.getEnvVarOwned(self.allocator, "PATHEXT") catch |err| switch (err) {
- error.EnvironmentVariableNotFound => blk: {
- free_path_ext = false;
- break :blk "";
- },
- else => |e| return e,
- };
- defer if (free_path_ext) self.allocator.free(PATHEXT);
+ try app_buf.appendSlice(self.allocator, app_name_w);
- const app_name = self.argv[0];
+ var dir_buf = std.ArrayListUnmanaged(u16){};
+ defer dir_buf.deinit(self.allocator);
- var it = mem.tokenize(u8, PATH, ";");
- retry: while (it.next()) |search_path| {
- const path_no_ext = try fs.path.join(self.allocator, &[_][]const u8{ search_path, app_name });
- defer self.allocator.free(path_no_ext);
-
- var ext_it = mem.tokenize(u8, PATHEXT, ";");
- while (ext_it.next()) |app_ext| {
- const joined_path = try mem.concat(self.allocator, u8, &[_][]const u8{ path_no_ext, app_ext });
- defer self.allocator.free(joined_path);
+ if (cwd_path_w.len > 0) {
+ try dir_buf.appendSlice(self.allocator, cwd_path_w);
+ }
+ if (app_dirname_w) |app_dir| {
+ if (dir_buf.items.len > 0) try dir_buf.append(self.allocator, fs.path.sep);
+ try dir_buf.appendSlice(self.allocator, app_dir);
+ }
+ if (dir_buf.items.len > 0) {
+ // Need to normalize the path, openDirW can't handle things like double backslashes
+ const normalized_len = windows.normalizePath(u16, dir_buf.items) catch return error.BadPathName;
+ dir_buf.shrinkRetainingCapacity(normalized_len);
+ }
- const joined_path_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, joined_path);
- defer self.allocator.free(joined_path_w);
+ windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
+ var original_err = switch (no_path_err) {
+ error.FileNotFound, error.InvalidExe, error.AccessDenied => |e| e,
+ error.UnrecoverableInvalidExe => return error.InvalidExe,
+ else => |e| return e,
+ };
+
+ // If the app name had path separators, that disallows PATH searching,
+ // and there's no need to search the PATH if the app name is absolute.
+ // We still search the path if the cwd is absolute because of the
+ // "cwd set in ChildProcess is in effect when choosing the executable path
+ // to match posix semantics" behavior--we don't want to skip searching
+ // the PATH just because we were trying to set the cwd of the child process.
+ if (app_dirname_w != null or app_name_is_absolute) {
+ return original_err;
+ }
- if (windowsCreateProcess(joined_path_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) |_| {
- break :retry;
+ var it = mem.tokenize(u16, PATH, &[_]u16{';'});
+ while (it.next()) |search_path| {
+ dir_buf.clearRetainingCapacity();
+ try dir_buf.appendSlice(self.allocator, search_path);
+ // Need to normalize the path, some PATH values can contain things like double
+ // backslashes which openDirW can't handle
+ const normalized_len = windows.normalizePath(u16, dir_buf.items) catch continue;
+ dir_buf.shrinkRetainingCapacity(normalized_len);
+
+ if (windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) {
+ break :exec;
} else |err| switch (err) {
- error.FileNotFound => continue,
- error.AccessDenied => continue,
- else => return err,
+ error.FileNotFound, error.AccessDenied, error.InvalidExe => continue,
+ error.UnrecoverableInvalidExe => return error.InvalidExe,
+ else => |e| return e,
}
+ } else {
+ return original_err;
}
- } else {
- return no_path_err; // return the original error
- }
- };
+ };
+ }
if (g_hChildStd_IN_Wr) |h| {
self.stdin = File{ .handle = h };
@@ -1057,6 +1094,235 @@ pub const ChildProcess = struct {
}
};
+/// Expects `app_buf` to contain exactly the app name, and `dir_buf` to contain exactly the dir path.
+/// After return, `app_buf` will always contain exactly the app name and `dir_buf` will always contain exactly the dir path.
+/// Note: `app_buf` should not contain any leading path separators.
+/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
+fn windowsCreateProcessPathExt(
+ allocator: mem.Allocator,
+ dir_buf: *std.ArrayListUnmanaged(u16),
+ app_buf: *std.ArrayListUnmanaged(u16),
+ pathext: [:0]const u16,
+ cmd_line: [*:0]u16,
+ envp_ptr: ?[*]u16,
+ cwd_ptr: ?[*:0]u16,
+ lpStartupInfo: *windows.STARTUPINFOW,
+ lpProcessInformation: *windows.PROCESS_INFORMATION,
+) !void {
+ const app_name_len = app_buf.items.len;
+ const dir_path_len = dir_buf.items.len;
+
+ if (app_name_len == 0) return error.FileNotFound;
+
+ defer app_buf.shrinkRetainingCapacity(app_name_len);
+ defer dir_buf.shrinkRetainingCapacity(dir_path_len);
+
+ // The name of the game here is to avoid CreateProcessW calls at all costs,
+ // and only ever try calling it when we have a real candidate for execution.
+ // Secondarily, we want to minimize the number of syscalls used when checking
+ // for each PATHEXT-appended version of the app name.
+ //
+ // An overview of the technique used:
+ // - Open the search directory for iteration (either cwd or a path from PATH)
+ // - Use NtQueryDirectoryFile with a wildcard filename of `<app name>*` to
+ // check if anything that could possibly match either the unappended version
+ // of the app name or any of the versions with a PATHEXT value appended exists.
+ // - If the wildcard NtQueryDirectoryFile call found nothing, we can exit early
+ // without needing to use PATHEXT at all.
+ //
+ // This allows us to use a <open dir, NtQueryDirectoryFile, close dir> sequence
+ // for any directory that doesn't contain any possible matches, instead of having
+ // to use a separate look up for each individual filename combination (unappended +
+ // each PATHEXT appended). For directories where the wildcard *does* match something,
+ // we only need to do a maximum of <number of supported PATHEXT extensions> more
+ // NtQueryDirectoryFile calls.
+
+ var dir = dir: {
+ if (fs.path.isAbsoluteWindowsWTF16(dir_buf.items[0..dir_path_len])) {
+ const prefixed_path = try windows.wToPrefixedFileW(dir_buf.items[0..dir_path_len]);
+ break :dir fs.cwd().openDirW(prefixed_path.span().ptr, .{}, true) catch return error.FileNotFound;
+ }
+ // needs to be null-terminated
+ try dir_buf.append(allocator, 0);
+ defer dir_buf.shrinkRetainingCapacity(dir_buf.items[0..dir_path_len].len);
+ const dir_path_z = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
+ break :dir std.fs.cwd().openDirW(dir_path_z.ptr, .{}, true) catch return error.FileNotFound;
+ };
+ defer dir.close();
+
+ // Add wildcard and null-terminator
+ try app_buf.append(allocator, '*');
+ try app_buf.append(allocator, 0);
+ const app_name_wildcard = app_buf.items[0 .. app_buf.items.len - 1 :0];
+
+ // Enough for the FILE_DIRECTORY_INFORMATION + (NAME_MAX UTF-16 code units [2 bytes each]).
+ const file_info_buf_size = @sizeOf(windows.FILE_DIRECTORY_INFORMATION) + (windows.NAME_MAX * 2);
+ var file_information_buf: [file_info_buf_size]u8 align(@alignOf(os.windows.FILE_DIRECTORY_INFORMATION)) = undefined;
+ var io_status: windows.IO_STATUS_BLOCK = undefined;
+ const found_name: ?[]const u16 = found_name: {
+ const app_name_len_bytes = math.cast(u16, app_name_wildcard.len * 2) orelse return error.NameTooLong;
+ var app_name_unicode_string = windows.UNICODE_STRING{
+ .Length = app_name_len_bytes,
+ .MaximumLength = app_name_len_bytes,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_wildcard.ptr)),
+ };
+ const rc = windows.ntdll.NtQueryDirectoryFile(
+ dir.fd,
+ null,
+ null,
+ null,
+ &io_status,
+ &file_information_buf,
+ file_information_buf.len,
+ .FileDirectoryInformation,
+ // TODO: It might be better to iterate over all wildcard matches and
+ // only pick the ones that match an appended PATHEXT instead of only
+ // using the wildcard as a lookup and then restarting iteration
+ // on future NtQueryDirectoryFile calls.
+ //
+ // However, note that this could lead to worse outcomes in the
+ // case of a very generic command name (e.g. "a"), so it might
+ // be better to only use the wildcard to determine if it's worth
+ // checking with PATHEXT (this is the current behavior).
+ windows.TRUE, // single result
+ &app_name_unicode_string,
+ windows.TRUE, // restart iteration
+ );
+
+ // If we get nothing with the wildcard, then we can just bail out
+ // as we know appending PATHEXT will not yield anything.
+ switch (rc) {
+ .SUCCESS => {},
+ .NO_SUCH_FILE => return error.FileNotFound,
+ .NO_MORE_FILES => return error.FileNotFound,
+ .ACCESS_DENIED => return error.AccessDenied,
+ else => return windows.unexpectedStatus(rc),
+ }
+
+ const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+ if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) {
+ break :found_name null;
+ }
+ break :found_name @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2];
+ };
+
+ const unappended_err = unappended: {
+ // NtQueryDirectoryFile returns results in order by filename, so the first result of
+ // the wildcard call will always be the unappended version if it exists. So, if found_name
+ // is not the unappended version, we can skip straight to trying versions with PATHEXT appended.
+ // TODO: This might depend on the filesystem, though; need to somehow verify that it always
+ // works this way.
+ if (found_name != null and windows.eqlIgnoreCaseWTF16(found_name.?, app_buf.items[0..app_name_len])) {
+ if (dir_path_len != 0) switch (dir_buf.items[dir_buf.items.len - 1]) {
+ '/', '\\' => {},
+ else => try dir_buf.append(allocator, fs.path.sep),
+ };
+ try dir_buf.appendSlice(allocator, app_buf.items[0..app_name_len]);
+ try dir_buf.append(allocator, 0);
+ const full_app_name = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
+
+ if (windowsCreateProcess(full_app_name.ptr, cmd_line, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
+ return;
+ } else |err| switch (err) {
+ error.FileNotFound,
+ error.AccessDenied,
+ => break :unappended err,
+ error.InvalidExe => {
+ // On InvalidExe, if the extension of the app name is .exe then
+ // it's treated as an unrecoverable error. Otherwise, it'll be
+ // skipped as normal.
+ const app_name = app_buf.items[0..app_name_len];
+ const ext_start = std.mem.lastIndexOfScalar(u16, app_name, '.') orelse break :unappended err;
+ const ext = app_name[ext_start..];
+ if (windows.eqlIgnoreCaseWTF16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
+ return error.UnrecoverableInvalidExe;
+ }
+ break :unappended err;
+ },
+ else => return err,
+ }
+ }
+ break :unappended error.FileNotFound;
+ };
+
+ // Now we know that at least *a* file matching the wildcard exists, we can loop
+ // through PATHEXT in order and exec any that exist
+
+ var ext_it = mem.tokenize(u16, pathext, &[_]u16{';'});
+ while (ext_it.next()) |ext| {
+ if (!windowsCreateProcessSupportsExtension(ext)) continue;
+
+ app_buf.shrinkRetainingCapacity(app_name_len);
+ try app_buf.appendSlice(allocator, ext);
+ try app_buf.append(allocator, 0);
+ const app_name_appended = app_buf.items[0 .. app_buf.items.len - 1 :0];
+
+ const app_name_len_bytes = math.cast(u16, app_name_appended.len * 2) orelse return error.NameTooLong;
+ var app_name_unicode_string = windows.UNICODE_STRING{
+ .Length = app_name_len_bytes,
+ .MaximumLength = app_name_len_bytes,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_appended.ptr)),
+ };
+
+ // Re-use the directory handle but this time we call with the appended app name
+ // with no wildcard.
+ const rc = windows.ntdll.NtQueryDirectoryFile(
+ dir.fd,
+ null,
+ null,
+ null,
+ &io_status,
+ &file_information_buf,
+ file_information_buf.len,
+ .FileDirectoryInformation,
+ windows.TRUE, // single result
+ &app_name_unicode_string,
+ windows.TRUE, // restart iteration
+ );
+
+ switch (rc) {
+ .SUCCESS => {},
+ .NO_SUCH_FILE => continue,
+ .NO_MORE_FILES => continue,
+ .ACCESS_DENIED => continue,
+ else => return windows.unexpectedStatus(rc),
+ }
+
+ const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+ // Skip directories
+ if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue;
+
+ dir_buf.shrinkRetainingCapacity(dir_path_len);
+ if (dir_path_len != 0) switch (dir_buf.items[dir_buf.items.len - 1]) {
+ '/', '\\' => {},
+ else => try dir_buf.append(allocator, fs.path.sep),
+ };
+ try dir_buf.appendSlice(allocator, app_buf.items[0..app_name_len]);
+ try dir_buf.appendSlice(allocator, ext);
+ try dir_buf.append(allocator, 0);
+ const full_app_name = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
+
+ if (windowsCreateProcess(full_app_name.ptr, cmd_line, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
+ return;
+ } else |err| switch (err) {
+ error.FileNotFound => continue,
+ error.AccessDenied => continue,
+ error.InvalidExe => {
+ // On InvalidExe, if the extension of the app name is .exe then
+ // it's treated as an unrecoverable error. Otherwise, it'll be
+ // skipped as normal.
+ if (windows.eqlIgnoreCaseWTF16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
+ return error.UnrecoverableInvalidExe;
+ }
+ continue;
+ },
+ else => return err,
+ }
+ }
+
+ return unappended_err;
+}
+
fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u16, cwd_ptr: ?[*:0]u16, lpStartupInfo: *windows.STARTUPINFOW, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
// TODO the docs for environment pointer say:
// > A pointer to the environment block for the new process. If this parameter
@@ -1089,6 +1355,70 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
);
}
+/// Case-insenstive UTF-16 lookup
+fn windowsCreateProcessSupportsExtension(ext: []const u16) bool {
+ if (ext.len != 4) return false;
+ const State = enum {
+ start,
+ dot,
+ b,
+ ba,
+ c,
+ cm,
+ co,
+ e,
+ ex,
+ };
+ var state: State = .start;
+ for (ext) |c| switch (state) {
+ .start => switch (c) {
+ '.' => state = .dot,
+ else => return false,
+ },
+ .dot => switch (c) {
+ 'b', 'B' => state = .b,
+ 'c', 'C' => state = .c,
+ 'e', 'E' => state = .e,
+ else => return false,
+ },
+ .b => switch (c) {
+ 'a', 'A' => state = .ba,
+ else => return false,
+ },
+ .c => switch (c) {
+ 'm', 'M' => state = .cm,
+ 'o', 'O' => state = .co,
+ else => return false,
+ },
+ .e => switch (c) {
+ 'x', 'X' => state = .ex,
+ else => return false,
+ },
+ .ba => switch (c) {
+ 't', 'T' => return true, // .BAT
+ else => return false,
+ },
+ .cm => switch (c) {
+ 'd', 'D' => return true, // .CMD
+ else => return false,
+ },
+ .co => switch (c) {
+ 'm', 'M' => return true, // .COM
+ else => return false,
+ },
+ .ex => switch (c) {
+ 'e', 'E' => return true, // .EXE
+ else => return false,
+ },
+ };
+ return false;
+}
+
+test "windowsCreateProcessSupportsExtension" {
+ try std.testing.expect(windowsCreateProcessSupportsExtension(&[_]u16{ '.', 'e', 'X', 'e' }));
+ try std.testing.expect(!windowsCreateProcessSupportsExtension(&[_]u16{ '.', 'e', 'X', 'e', 'c' }));
+}
+
/// Caller must dealloc.
fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 {
var buf = std.ArrayList(u8).init(allocator);
diff --git a/lib/std/compress/deflate/bits_utils.zig b/lib/std/compress/deflate/bits_utils.zig
index 1620a8e380..85bae95bc8 100644
--- a/lib/std/compress/deflate/bits_utils.zig
+++ b/lib/std/compress/deflate/bits_utils.zig
@@ -8,7 +8,6 @@ pub fn bitReverse(comptime T: type, value: T, N: usize) T {
test "bitReverse" {
const std = @import("std");
- const expect = std.testing.expect;
const ReverseBitsTest = struct {
in: u16,
@@ -29,6 +28,6 @@ test "bitReverse" {
for (reverse_bits_tests) |h| {
var v = bitReverse(u16, h.in, h.bit_count);
- try expect(v == h.out);
+ try std.testing.expectEqual(h.out, v);
}
}
diff --git a/lib/std/compress/deflate/compressor.zig b/lib/std/compress/deflate/compressor.zig
index 653c270f54..45c5c6bf8e 100644
--- a/lib/std/compress/deflate/compressor.zig
+++ b/lib/std/compress/deflate/compressor.zig
@@ -1079,7 +1079,7 @@ test "deflate" {
try comp.close();
comp.deinit();
- try expect(mem.eql(u8, output.items, dt.out));
+ try testing.expectEqualSlices(u8, dt.out, output.items);
}
}
@@ -1104,7 +1104,7 @@ test "bulkHash4" {
_ = bulkHash4(y, dst);
for (dst) |got, i| {
var want = hash4(y[i..]);
- try expect(got == want);
+ try testing.expectEqual(want, got);
}
}
}
diff --git a/lib/std/compress/deflate/compressor_test.zig b/lib/std/compress/deflate/compressor_test.zig
index 51f459cd65..c51f68236d 100644
--- a/lib/std/compress/deflate/compressor_test.zig
+++ b/lib/std/compress/deflate/compressor_test.zig
@@ -62,8 +62,8 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
defer testing.allocator.free(decompressed);
var read = try decomp.reader().readAll(decompressed); // read at least half
- try expect(read == half_len);
- try expect(mem.eql(u8, input[0..half], decompressed));
+ try testing.expectEqual(half_len, read);
+ try testing.expectEqualSlices(u8, input[0..half], decompressed);
}
// Write last half of the input and close()
@@ -79,13 +79,13 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
defer testing.allocator.free(decompressed);
var read = try decomp.reader().readAll(decompressed);
- try expect(read == half_len);
- try expect(mem.eql(u8, input[half..], decompressed));
+ try testing.expectEqual(half_len, read);
+ try testing.expectEqualSlices(u8, input[half..], decompressed);
// Extra read
var final: [10]u8 = undefined;
read = try decomp.reader().readAll(&final);
- try expect(read == 0); // expect ended stream to return 0 bytes
+ try testing.expectEqual(@as(usize, 0), read); // expect ended stream to return 0 bytes
_ = decomp.close();
}
@@ -105,7 +105,7 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
_ = try decomp.reader().readAll(decompressed);
_ = decomp.close();
- try expect(mem.eql(u8, input, decompressed));
+ try testing.expectEqualSlices(u8, input, decompressed);
}
fn testToFromWithLevelAndLimit(level: deflate.Compression, input: []const u8, limit: u32) !void {
@@ -130,8 +130,8 @@ fn testToFromWithLevelAndLimit(level: deflate.Compression, input: []const u8, li
defer testing.allocator.free(decompressed);
var read: usize = try decomp.reader().readAll(decompressed);
- try expect(read == input.len);
- try expect(mem.eql(u8, input, decompressed));
+ try testing.expectEqual(input.len, read);
+ try testing.expectEqualSlices(u8, input, decompressed);
if (false) {
// TODO: this test has regressed
@@ -172,9 +172,7 @@ test "deflate/inflate" {
defer testing.allocator.free(large_data_chunk);
// fill with random data
for (large_data_chunk) |_, i| {
- var mul: u8 = @truncate(u8, i);
- _ = @mulWithOverflow(u8, mul, mul, &mul);
- large_data_chunk[i] = mul;
+ large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i);
}
try testToFromWithLimit(large_data_chunk, limits);
}
@@ -237,7 +235,7 @@ test "very long sparse chunk" {
read = try reader.read(&buf);
written += try writer.write(buf[0..read]);
}
- try expect(written == 0x23e8);
+ try testing.expectEqual(@as(usize, 0x23e8), written);
}
test "compressor reset" {
@@ -287,7 +285,7 @@ fn testWriterReset(level: deflate.Compression, dict: ?[]const u8) !void {
try filler.writeData(&comp);
try comp.close();
- try expect(mem.eql(u8, buf1.items, buf2.items));
+ try testing.expectEqualSlices(u8, buf1.items, buf2.items);
}
test "decompressor dictionary" {
@@ -327,7 +325,7 @@ test "decompressor dictionary" {
defer decomp.deinit();
_ = try decomp.reader().readAll(decompressed);
- try expect(mem.eql(u8, decompressed, "hello again world"));
+ try testing.expectEqualSlices(u8, "hello again world", decompressed);
}
test "compressor dictionary" {
@@ -371,7 +369,7 @@ test "compressor dictionary" {
try comp_d.writer().writeAll(text);
try comp_d.close();
- try expect(mem.eql(u8, compressed_nd.readableSlice(0), compressed_d.items));
+ try testing.expectEqualSlices(u8, compressed_d.items, compressed_nd.readableSlice(0));
}
// Update the hash for best_speed only if d.index < d.maxInsertIndex
@@ -394,18 +392,12 @@ test "Go non-regression test for 2508" {
}
test "deflate/inflate string" {
- // Skip wasi because it does not support std.fs.openDirAbsolute()
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- const current_dir = try std.fs.openDirAbsolute(std.fs.path.dirname(@src().file).?, .{});
- const testdata_dir = try current_dir.openDir("testdata", .{});
-
const StringTest = struct {
filename: []const u8,
limit: [11]u32,
};
- var deflate_inflate_string_tests = [_]StringTest{
+ const deflate_inflate_string_tests = [_]StringTest{
.{
.filename = "compress-e.txt",
.limit = [11]u32{
@@ -440,12 +432,8 @@ test "deflate/inflate string" {
},
};
- for (deflate_inflate_string_tests) |t| {
- const golden_file = try testdata_dir.openFile(t.filename, .{});
- defer golden_file.close();
- var golden = try golden_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(golden);
-
+ inline for (deflate_inflate_string_tests) |t| {
+ var golden = @embedFile("testdata/" ++ t.filename);
try testToFromWithLimit(golden, t.limit);
}
}
@@ -492,11 +480,8 @@ test "inflate reset" {
_ = decomp.close();
- try expect(strings[0].len == decompressed_0.len);
- try expect(strings[1].len == decompressed_1.len);
-
- try expect(mem.eql(u8, strings[0], decompressed_0));
- try expect(mem.eql(u8, strings[1], decompressed_1));
+ try testing.expectEqualSlices(u8, strings[0], decompressed_0);
+ try testing.expectEqualSlices(u8, strings[1], decompressed_1);
}
test "inflate reset dictionary" {
@@ -542,9 +527,6 @@ test "inflate reset dictionary" {
_ = decomp.close();
- try expect(strings[0].len == decompressed_0.len);
- try expect(strings[1].len == decompressed_1.len);
-
- try expect(mem.eql(u8, strings[0], decompressed_0));
- try expect(mem.eql(u8, strings[1], decompressed_1));
+ try testing.expectEqualSlices(u8, strings[0], decompressed_0);
+ try testing.expectEqualSlices(u8, strings[1], decompressed_1);
}
diff --git a/lib/std/compress/deflate/decompressor.zig b/lib/std/compress/deflate/decompressor.zig
index c8d8e07bbc..baef85cace 100644
--- a/lib/std/compress/deflate/decompressor.zig
+++ b/lib/std/compress/deflate/decompressor.zig
@@ -895,7 +895,6 @@ pub fn Decompressor(comptime ReaderType: type) type {
}
// tests
-const expect = std.testing.expect;
const expectError = std.testing.expectError;
const io = std.io;
const testing = std.testing;
@@ -928,7 +927,7 @@ test "truncated input" {
var output = [1]u8{0} ** 12;
try expectError(error.UnexpectedEndOfStream, zr.readAll(&output));
- try expect(mem.eql(u8, output[0..t.output.len], t.output));
+ try testing.expectEqualSlices(u8, t.output, output[0..t.output.len]);
}
}
@@ -1026,8 +1025,8 @@ test "inflate A Tale of Two Cities (1859) intro" {
var got: [700]u8 = undefined;
var got_len = try decomp.reader().read(&got);
- try expect(got_len == 616);
- try expect(mem.eql(u8, got[0..expected.len], expected));
+ try testing.expectEqual(@as(usize, 616), got_len);
+ try testing.expectEqualSlices(u8, expected, got[0..expected.len]);
}
test "lengths overflow" {
diff --git a/lib/std/compress/deflate/deflate_fast.zig b/lib/std/compress/deflate/deflate_fast.zig
index b85c5046e5..12d3e4203a 100644
--- a/lib/std/compress/deflate/deflate_fast.zig
+++ b/lib/std/compress/deflate/deflate_fast.zig
@@ -354,7 +354,7 @@ pub const DeflateFast = struct {
};
test "best speed match 1/3" {
- const expect = std.testing.expect;
+ const expectEqual = std.testing.expectEqual;
{
var previous = [_]u8{ 0, 0, 0, 1, 2 };
@@ -367,7 +367,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 3, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
- try expect(got == 6);
+ try expectEqual(@as(i32, 6), got);
}
{
var previous = [_]u8{ 0, 0, 0, 1, 2 };
@@ -380,7 +380,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 2, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
- try expect(got == 3);
+ try expectEqual(@as(i32, 3), got);
}
{
var previous = [_]u8{ 0, 0, 0, 1, 1 };
@@ -393,7 +393,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 3, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
- try expect(got == 2);
+ try expectEqual(@as(i32, 2), got);
}
{
var previous = [_]u8{ 0, 0, 0, 1, 2 };
@@ -406,7 +406,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(0, -1, &current);
- try expect(got == 4);
+ try expectEqual(@as(i32, 4), got);
}
{
var previous = [_]u8{ 0, 0, 0, 1, 2, 3, 4, 5, 2, 2 };
@@ -419,7 +419,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(4, -7, &current);
- try expect(got == 5);
+ try expectEqual(@as(i32, 5), got);
}
{
var previous = [_]u8{ 9, 9, 9, 9, 9 };
@@ -432,7 +432,7 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(0, -1, &current);
- try expect(got == 0);
+ try expectEqual(@as(i32, 0), got);
}
{
var previous = [_]u8{ 9, 9, 9, 9, 9 };
@@ -445,12 +445,12 @@ test "best speed match 1/3" {
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, 0, &current);
- try expect(got == 0);
+ try expectEqual(@as(i32, 0), got);
}
}
test "best speed match 2/3" {
- const expect = std.testing.expect;
+ const expectEqual = std.testing.expectEqual;
{
var previous = [_]u8{};
@@ -463,7 +463,7 @@ test "best speed match 2/3" {
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, -5, &current);
- try expect(got == 0);
+ try expectEqual(@as(i32, 0), got);
}
{
var previous = [_]u8{};
@@ -476,7 +476,7 @@ test "best speed match 2/3" {
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, -1, &current);
- try expect(got == 0);
+ try expectEqual(@as(i32, 0), got);
}
{
var previous = [_]u8{};
@@ -489,7 +489,7 @@ test "best speed match 2/3" {
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, 0, &current);
- try expect(got == 3);
+ try expectEqual(@as(i32, 3), got);
}
{
var previous = [_]u8{ 3, 4, 5 };
@@ -502,13 +502,13 @@ test "best speed match 2/3" {
};
var current = [_]u8{ 3, 4, 5 };
var got: i32 = e.matchLen(0, -3, &current);
- try expect(got == 3);
+ try expectEqual(@as(i32, 3), got);
}
}
test "best speed match 2/2" {
const testing = std.testing;
- const expect = testing.expect;
+ const expectEqual = testing.expectEqual;
const Case = struct {
previous: u32,
@@ -580,7 +580,7 @@ test "best speed match 2/2" {
.cur = 0,
};
var got: i32 = e.matchLen(c.s, c.t, current);
- try expect(got == c.expected);
+ try expectEqual(@as(i32, c.expected), got);
}
}
@@ -623,16 +623,16 @@ test "best speed shift offsets" {
tokens_count = 0;
enc.encode(&tokens, &tokens_count, &test_data);
var got = tokens_count;
- try expect(want_first_tokens == got);
+ try testing.expectEqual(want_first_tokens, got);
// Verify we are about to wrap.
- try expect(enc.cur == buffer_reset);
+ try testing.expectEqual(@as(i32, buffer_reset), enc.cur);
// Part 2 should match clean state as well even if wrapped.
tokens_count = 0;
enc.encode(&tokens, &tokens_count, &test_data);
got = tokens_count;
- try expect(want_second_tokens == got);
+ try testing.expectEqual(want_second_tokens, got);
// Verify that we wrapped.
try expect(enc.cur < buffer_reset);
@@ -645,13 +645,12 @@ test "best speed shift offsets" {
tokens_count = 0;
enc.encode(&tokens, &tokens_count, &test_data);
got = tokens_count;
- try expect(want_first_tokens == got);
+ try testing.expectEqual(want_first_tokens, got);
}
test "best speed reset" {
// test that encoding is consistent across a warparound of the table offset.
// See https://github.com/golang/go/issues/34121
- const expect = std.testing.expect;
const fmt = std.fmt;
const testing = std.testing;
@@ -716,6 +715,6 @@ test "best speed reset" {
try comp.close();
// output must match at wraparound
- try expect(mem.eql(u8, got.items, want.items));
+ try testing.expectEqualSlices(u8, want.items, got.items);
}
}
diff --git a/lib/std/compress/deflate/deflate_fast_test.zig b/lib/std/compress/deflate/deflate_fast_test.zig
index b3a255e598..9f7b639cba 100644
--- a/lib/std/compress/deflate/deflate_fast_test.zig
+++ b/lib/std/compress/deflate/deflate_fast_test.zig
@@ -85,8 +85,8 @@ test "best speed" {
var read = try decomp.reader().readAll(decompressed);
_ = decomp.close();
- try expect(read == want.items.len);
- try expect(mem.eql(u8, want.items, decompressed));
+ try testing.expectEqual(want.items.len, read);
+ try testing.expectEqualSlices(u8, want.items, decompressed);
}
}
}
@@ -152,8 +152,8 @@ test "best speed max match offset" {
var read = try decomp.reader().readAll(decompressed);
_ = decomp.close();
- try expect(read == src.len);
- try expect(mem.eql(u8, decompressed, src));
+ try testing.expectEqual(src.len, read);
+ try testing.expectEqualSlices(u8, src, decompressed);
}
}
}
diff --git a/lib/std/compress/deflate/dict_decoder.zig b/lib/std/compress/deflate/dict_decoder.zig
index 86215e572e..e2a185dc39 100644
--- a/lib/std/compress/deflate/dict_decoder.zig
+++ b/lib/std/compress/deflate/dict_decoder.zig
@@ -206,7 +206,6 @@ pub const DictDecoder = struct {
test "dictionary decoder" {
const ArrayList = std.ArrayList;
- const expect = std.testing.expect;
const testing = std.testing;
const abc = "ABC\n";
@@ -416,5 +415,5 @@ test "dictionary decoder" {
_ = try want.write(want_list.items[want_list.items.len - dd.histSize() ..][0..10]);
_ = try got.write(dd.readFlush());
- try expect(mem.eql(u8, got_list.items, want_list.items));
+ try testing.expectEqualSlices(u8, want_list.items, got_list.items);
}
diff --git a/lib/std/compress/deflate/huffman_bit_writer.zig b/lib/std/compress/deflate/huffman_bit_writer.zig
index c0b0502d15..fc5727ca63 100644
--- a/lib/std/compress/deflate/huffman_bit_writer.zig
+++ b/lib/std/compress/deflate/huffman_bit_writer.zig
@@ -121,7 +121,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.nbytes = 0;
}
- fn write(self: *Self, b: []u8) Error!void {
+ fn write(self: *Self, b: []const u8) Error!void {
if (self.err) {
return;
}
@@ -155,7 +155,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
}
- pub fn writeBytes(self: *Self, bytes: []u8) Error!void {
+ pub fn writeBytes(self: *Self, bytes: []const u8) Error!void {
if (self.err) {
return;
}
@@ -323,7 +323,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
// storedSizeFits calculates the stored size, including header.
// The function returns the size in bits and whether the block
// fits inside a single block.
- fn storedSizeFits(in: ?[]u8) StoredSize {
+ fn storedSizeFits(in: ?[]const u8) StoredSize {
if (in == null) {
return .{ .size = 0, .storable = false };
}
@@ -453,7 +453,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self: *Self,
tokens: []const token.Token,
eof: bool,
- input: ?[]u8,
+ input: ?[]const u8,
) Error!void {
if (self.err) {
return;
@@ -546,7 +546,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self: *Self,
tokens: []const token.Token,
eof: bool,
- input: ?[]u8,
+ input: ?[]const u8,
) Error!void {
if (self.err) {
return;
@@ -685,7 +685,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
// Encodes a block of bytes as either Huffman encoded literals or uncompressed bytes
// if the results only gains very little from compression.
- pub fn writeBlockHuff(self: *Self, eof: bool, input: []u8) Error!void {
+ pub fn writeBlockHuff(self: *Self, eof: bool, input: []const u8) Error!void {
if (self.err) {
return;
}
@@ -828,7 +828,7 @@ pub fn huffmanBitWriter(allocator: Allocator, writer: anytype) !HuffmanBitWriter
// histogram accumulates a histogram of b in h.
//
// h.len must be >= 256, and h's elements must be all zeroes.
-fn histogram(b: []u8, h: *[]u16) void {
+fn histogram(b: []const u8, h: *[]u16) void {
var lh = h.*[0..256];
for (b) |t| {
lh[t] += 1;
@@ -848,11 +848,6 @@ test "writeBlockHuff" {
// Tests huffman encoding against reference files to detect possible regressions.
// If encoding/bit allocation changes you can regenerate these files
- if (builtin.os.tag == .windows) {
- // https://github.com/ziglang/zig/issues/13892
- return error.SkipZigTest;
- }
-
try testBlockHuff(
"huffman-null-max.input",
"huffman-null-max.golden",
@@ -891,21 +886,9 @@ test "writeBlockHuff" {
);
}
-fn testBlockHuff(in_name: []const u8, want_name: []const u8) !void {
- // Skip wasi because it does not support std.fs.openDirAbsolute()
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- const current_dir = try std.fs.openDirAbsolute(std.fs.path.dirname(@src().file).?, .{});
- const testdata_dir = try current_dir.openDir("testdata", .{});
- const in_file = try testdata_dir.openFile(in_name, .{});
- defer in_file.close();
- const want_file = try testdata_dir.openFile(want_name, .{});
- defer want_file.close();
-
- var in = try in_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(in);
- var want = try want_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(want);
+fn testBlockHuff(comptime in_name: []const u8, comptime want_name: []const u8) !void {
+ const in: []const u8 = @embedFile("testdata/" ++ in_name);
+ const want: []const u8 = @embedFile("testdata/" ++ want_name);
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
@@ -914,7 +897,7 @@ fn testBlockHuff(in_name: []const u8, want_name: []const u8) !void {
try bw.writeBlockHuff(false, in);
try bw.flush();
- try expect(mem.eql(u8, buf.items, want));
+ try std.testing.expectEqualSlices(u8, want, buf.items);
// Test if the writer produces the same output after reset.
var buf_after_reset = ArrayList(u8).init(testing.allocator);
@@ -925,8 +908,8 @@ fn testBlockHuff(in_name: []const u8, want_name: []const u8) !void {
try bw.writeBlockHuff(false, in);
try bw.flush();
- try expect(mem.eql(u8, buf_after_reset.items, buf.items));
- try expect(mem.eql(u8, buf_after_reset.items, want));
+ try std.testing.expectEqualSlices(u8, buf.items, buf_after_reset.items);
+ try std.testing.expectEqualSlices(u8, want, buf_after_reset.items);
try testWriterEOF(.write_huffman_block, &[0]token.Token{}, in);
}
@@ -1617,38 +1600,18 @@ test "writeBlockDynamic" {
// testBlock tests a block against its references,
// or regenerate the references, if "-update" flag is set.
-fn testBlock(comptime ht: HuffTest, ttype: TestType) !void {
- // Skip wasi because it does not support std.fs.openDirAbsolute()
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var want_name: []u8 = undefined;
- var want_name_no_input: []u8 = undefined;
- var input: []u8 = undefined;
- var want: []u8 = undefined;
- var want_ni: []u8 = undefined; // want no input: what we expect when input is empty
-
- const current_dir = try std.fs.openDirAbsolute(std.fs.path.dirname(@src().file).?, .{});
- const testdata_dir = try current_dir.openDir("testdata", .{});
-
- var want_name_type = if (ht.want.len == 0) .{} else .{ttype.to_s()};
- want_name = try fmt.allocPrint(testing.allocator, ht.want, want_name_type);
- defer testing.allocator.free(want_name);
-
- if (!mem.eql(u8, ht.input, "")) {
- const in_file = try testdata_dir.openFile(ht.input, .{});
- input = try in_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(input);
-
- const want_file = try testdata_dir.openFile(want_name, .{});
- want = try want_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(want);
+fn testBlock(comptime ht: HuffTest, comptime ttype: TestType) !void {
+ if (ht.input.len != 0 and ht.want.len != 0) {
+ const want_name = comptime fmt.comptimePrint(ht.want, .{ttype.to_s()});
+ const input = @embedFile("testdata/" ++ ht.input);
+ const want = @embedFile("testdata/" ++ want_name);
var buf = ArrayList(u8).init(testing.allocator);
var bw = try huffmanBitWriter(testing.allocator, buf.writer());
try writeToType(ttype, &bw, ht.tokens, input);
var got = buf.items;
- try expect(mem.eql(u8, got, want)); // expect writeBlock to yield expected result
+ try testing.expectEqualSlices(u8, want, got); // expect writeBlock to yield expected result
// Test if the writer produces the same output after reset.
buf.deinit();
@@ -1661,16 +1624,12 @@ fn testBlock(comptime ht: HuffTest, ttype: TestType) !void {
try writeToType(ttype, &bw, ht.tokens, input);
try bw.flush();
got = buf.items;
- try expect(mem.eql(u8, got, want)); // expect writeBlock to yield expected result
+ try testing.expectEqualSlices(u8, want, got); // expect writeBlock to yield expected result
try testWriterEOF(.write_block, ht.tokens, input);
}
- want_name_no_input = try fmt.allocPrint(testing.allocator, ht.want_no_input, .{ttype.to_s()});
- defer testing.allocator.free(want_name_no_input);
-
- const want_no_input_file = try testdata_dir.openFile(want_name_no_input, .{});
- want_ni = try want_no_input_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize));
- defer testing.allocator.free(want_ni);
+ const want_name_no_input = comptime fmt.comptimePrint(ht.want_no_input, .{ttype.to_s()});
+ const want_ni = @embedFile("testdata/" ++ want_name_no_input);
var buf = ArrayList(u8).init(testing.allocator);
var bw = try huffmanBitWriter(testing.allocator, buf.writer());
@@ -1678,7 +1637,7 @@ fn testBlock(comptime ht: HuffTest, ttype: TestType) !void {
try writeToType(ttype, &bw, ht.tokens, null);
var got = buf.items;
- try expect(mem.eql(u8, got, want_ni)); // expect writeBlock to yield expected result
+ try testing.expectEqualSlices(u8, want_ni, got); // expect writeBlock to yield expected result
try expect(got[0] & 1 != 1); // expect no EOF
// Test if the writer produces the same output after reset.
@@ -1693,11 +1652,11 @@ fn testBlock(comptime ht: HuffTest, ttype: TestType) !void {
try bw.flush();
got = buf.items;
- try expect(mem.eql(u8, got, want_ni)); // expect writeBlock to yield expected result
+ try testing.expectEqualSlices(u8, want_ni, got); // expect writeBlock to yield expected result
try testWriterEOF(.write_block, ht.tokens, &[0]u8{});
}
-fn writeToType(ttype: TestType, bw: anytype, tok: []const token.Token, input: ?[]u8) !void {
+fn writeToType(ttype: TestType, bw: anytype, tok: []const token.Token, input: ?[]const u8) !void {
switch (ttype) {
.write_block => try bw.writeBlock(tok, false, input),
.write_dyn_block => try bw.writeBlockDynamic(tok, false, input),
@@ -1707,7 +1666,7 @@ fn writeToType(ttype: TestType, bw: anytype, tok: []const token.Token, input: ?[
}
// Tests if the written block contains an EOF marker.
-fn testWriterEOF(ttype: TestType, ht_tokens: []const token.Token, input: []u8) !void {
+fn testWriterEOF(ttype: TestType, ht_tokens: []const token.Token, input: []const u8) !void {
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
var bw = try huffmanBitWriter(testing.allocator, buf.writer());
diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig
index 76546714b7..79598e59ae 100644
--- a/lib/std/compress/deflate/huffman_code.zig
+++ b/lib/std/compress/deflate/huffman_code.zig
@@ -386,39 +386,39 @@ test "generate a Huffman code from an array of frequencies" {
defer enc.deinit();
enc.generate(freqs[0..], 7);
- try testing.expect(enc.bitLength(freqs[0..]) == 141);
-
- try testing.expect(enc.codes[0].len == 3);
- try testing.expect(enc.codes[1].len == 6);
- try testing.expect(enc.codes[2].len == 6);
- try testing.expect(enc.codes[3].len == 5);
- try testing.expect(enc.codes[4].len == 3);
- try testing.expect(enc.codes[5].len == 2);
- try testing.expect(enc.codes[6].len == 2);
- try testing.expect(enc.codes[7].len == 6);
- try testing.expect(enc.codes[8].len == 0);
- try testing.expect(enc.codes[9].len == 0);
- try testing.expect(enc.codes[10].len == 0);
- try testing.expect(enc.codes[11].len == 0);
- try testing.expect(enc.codes[12].len == 0);
- try testing.expect(enc.codes[13].len == 0);
- try testing.expect(enc.codes[14].len == 0);
- try testing.expect(enc.codes[15].len == 0);
- try testing.expect(enc.codes[16].len == 6);
- try testing.expect(enc.codes[17].len == 5);
- try testing.expect(enc.codes[18].len == 3);
-
- try testing.expect(enc.codes[5].code == 0x0);
- try testing.expect(enc.codes[6].code == 0x2);
- try testing.expect(enc.codes[0].code == 0x1);
- try testing.expect(enc.codes[4].code == 0x5);
- try testing.expect(enc.codes[18].code == 0x3);
- try testing.expect(enc.codes[3].code == 0x7);
- try testing.expect(enc.codes[17].code == 0x17);
- try testing.expect(enc.codes[1].code == 0x0f);
- try testing.expect(enc.codes[2].code == 0x2f);
- try testing.expect(enc.codes[7].code == 0x1f);
- try testing.expect(enc.codes[16].code == 0x3f);
+ try testing.expectEqual(@as(u32, 141), enc.bitLength(freqs[0..]));
+
+ try testing.expectEqual(@as(usize, 3), enc.codes[0].len);
+ try testing.expectEqual(@as(usize, 6), enc.codes[1].len);
+ try testing.expectEqual(@as(usize, 6), enc.codes[2].len);
+ try testing.expectEqual(@as(usize, 5), enc.codes[3].len);
+ try testing.expectEqual(@as(usize, 3), enc.codes[4].len);
+ try testing.expectEqual(@as(usize, 2), enc.codes[5].len);
+ try testing.expectEqual(@as(usize, 2), enc.codes[6].len);
+ try testing.expectEqual(@as(usize, 6), enc.codes[7].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[8].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[9].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[10].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[11].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[12].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[13].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[14].len);
+ try testing.expectEqual(@as(usize, 0), enc.codes[15].len);
+ try testing.expectEqual(@as(usize, 6), enc.codes[16].len);
+ try testing.expectEqual(@as(usize, 5), enc.codes[17].len);
+ try testing.expectEqual(@as(usize, 3), enc.codes[18].len);
+
+ try testing.expectEqual(@as(u16, 0x0), enc.codes[5].code);
+ try testing.expectEqual(@as(u16, 0x2), enc.codes[6].code);
+ try testing.expectEqual(@as(u16, 0x1), enc.codes[0].code);
+ try testing.expectEqual(@as(u16, 0x5), enc.codes[4].code);
+ try testing.expectEqual(@as(u16, 0x3), enc.codes[18].code);
+ try testing.expectEqual(@as(u16, 0x7), enc.codes[3].code);
+ try testing.expectEqual(@as(u16, 0x17), enc.codes[17].code);
+ try testing.expectEqual(@as(u16, 0x0f), enc.codes[1].code);
+ try testing.expectEqual(@as(u16, 0x2f), enc.codes[2].code);
+ try testing.expectEqual(@as(u16, 0x1f), enc.codes[7].code);
+ try testing.expectEqual(@as(u16, 0x3f), enc.codes[16].code);
}
test "generate a Huffman code for the fixed litteral table specific to Deflate" {
diff --git a/lib/std/compress/deflate/token.zig b/lib/std/compress/deflate/token.zig
index 550a4bb331..d0e9a23647 100644
--- a/lib/std/compress/deflate/token.zig
+++ b/lib/std/compress/deflate/token.zig
@@ -99,6 +99,5 @@ pub fn offsetCode(off: u32) u32 {
test {
const std = @import("std");
- const expect = std.testing.expect;
- try expect(matchToken(555, 555) == 3_401_581_099);
+ try std.testing.expectEqual(@as(Token, 3_401_581_099), matchToken(555, 555));
}
diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig
index 735f30b176..d7a2cb0094 100644
--- a/lib/std/compress/gzip.zig
+++ b/lib/std/compress/gzip.zig
@@ -163,7 +163,7 @@ fn testReader(data: []const u8, comptime expected: []const u8) !void {
defer testing.allocator.free(buf);
// Check against the reference
- try testing.expectEqualSlices(u8, buf, expected);
+ try testing.expectEqualSlices(u8, expected, buf);
}
// All the test cases are obtained by compressing the RFC1952 text
@@ -172,8 +172,8 @@ fn testReader(data: []const u8, comptime expected: []const u8) !void {
// SHA256=164ef0897b4cbec63abf1b57f069f3599bd0fb7c72c2a4dee21bd7e03ec9af67
test "compressed data" {
try testReader(
- @embedFile("rfc1952.txt.gz"),
- @embedFile("rfc1952.txt"),
+ @embedFile("testdata/rfc1952.txt.gz"),
+ @embedFile("testdata/rfc1952.txt"),
);
}
diff --git a/lib/std/compress/rfc1951.txt b/lib/std/compress/testdata/rfc1951.txt
index 403c8c722f..403c8c722f 100644
--- a/lib/std/compress/rfc1951.txt
+++ b/lib/std/compress/testdata/rfc1951.txt
diff --git a/lib/std/compress/rfc1951.txt.fixed.z.9 b/lib/std/compress/testdata/rfc1951.txt.fixed.z.9
index 8ea5904770..8ea5904770 100644
--- a/lib/std/compress/rfc1951.txt.fixed.z.9
+++ b/lib/std/compress/testdata/rfc1951.txt.fixed.z.9
Binary files differ
diff --git a/lib/std/compress/rfc1951.txt.z.0 b/lib/std/compress/testdata/rfc1951.txt.z.0
index 3f50fb68f8..3f50fb68f8 100644
--- a/lib/std/compress/rfc1951.txt.z.0
+++ b/lib/std/compress/testdata/rfc1951.txt.z.0
Binary files differ
diff --git a/lib/std/compress/rfc1951.txt.z.9 b/lib/std/compress/testdata/rfc1951.txt.z.9
index 84e7cbe5b7..84e7cbe5b7 100644
--- a/lib/std/compress/rfc1951.txt.z.9
+++ b/lib/std/compress/testdata/rfc1951.txt.z.9
Binary files differ
diff --git a/lib/std/compress/rfc1952.txt b/lib/std/compress/testdata/rfc1952.txt
index a8e51b4567..a8e51b4567 100644
--- a/lib/std/compress/rfc1952.txt
+++ b/lib/std/compress/testdata/rfc1952.txt
diff --git a/lib/std/compress/rfc1952.txt.gz b/lib/std/compress/testdata/rfc1952.txt.gz
index be43b90a79..be43b90a79 100644
--- a/lib/std/compress/rfc1952.txt.gz
+++ b/lib/std/compress/testdata/rfc1952.txt.gz
Binary files differ
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index d7d33f9fa9..0fc96a5aa9 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -99,7 +99,7 @@ fn testReader(data: []const u8, expected: []const u8) !void {
defer testing.allocator.free(buf);
// Check against the reference
- try testing.expectEqualSlices(u8, buf, expected);
+ try testing.expectEqualSlices(u8, expected, buf);
}
// All the test cases are obtained by compressing the RFC1951 text
@@ -107,21 +107,21 @@ fn testReader(data: []const u8, expected: []const u8) !void {
// https://tools.ietf.org/rfc/rfc1951.txt length=36944 bytes
// SHA256=5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009
test "compressed data" {
- const rfc1951_txt = @embedFile("rfc1951.txt");
+ const rfc1951_txt = @embedFile("testdata/rfc1951.txt");
// Compressed with compression level = 0
try testReader(
- @embedFile("rfc1951.txt.z.0"),
+ @embedFile("testdata/rfc1951.txt.z.0"),
rfc1951_txt,
);
// Compressed with compression level = 9
try testReader(
- @embedFile("rfc1951.txt.z.9"),
+ @embedFile("testdata/rfc1951.txt.z.9"),
rfc1951_txt,
);
// Compressed with compression level = 9 and fixed Huffman codes
try testReader(
- @embedFile("rfc1951.txt.fixed.z.9"),
+ @embedFile("testdata/rfc1951.txt.fixed.z.9"),
rfc1951_txt,
);
}
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index 8aaf305143..20522c175d 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -176,6 +176,9 @@ const std = @import("std.zig");
pub const errors = @import("crypto/errors.zig");
+pub const tls = @import("crypto/tls.zig");
+pub const Certificate = @import("crypto/Certificate.zig");
+
test {
_ = aead.aegis.Aegis128L;
_ = aead.aegis.Aegis256;
@@ -264,6 +267,8 @@ test {
_ = utils;
_ = random;
_ = errors;
+ _ = tls;
+ _ = Certificate;
}
test "CSPRNG" {
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
new file mode 100644
index 0000000000..fe211c6146
--- /dev/null
+++ b/lib/std/crypto/Certificate.zig
@@ -0,0 +1,1115 @@
+buffer: []const u8,
+index: u32,
+
+pub const Bundle = @import("Certificate/Bundle.zig");
+
+pub const Algorithm = enum {
+ sha1WithRSAEncryption,
+ sha224WithRSAEncryption,
+ sha256WithRSAEncryption,
+ sha384WithRSAEncryption,
+ sha512WithRSAEncryption,
+ ecdsa_with_SHA224,
+ ecdsa_with_SHA256,
+ ecdsa_with_SHA384,
+ ecdsa_with_SHA512,
+
+ pub const map = std.ComptimeStringMap(Algorithm, .{
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x05 }, .sha1WithRSAEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x0B }, .sha256WithRSAEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x0C }, .sha384WithRSAEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x0D }, .sha512WithRSAEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x0E }, .sha224WithRSAEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x01 }, .ecdsa_with_SHA224 },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x02 }, .ecdsa_with_SHA256 },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x03 }, .ecdsa_with_SHA384 },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x04 }, .ecdsa_with_SHA512 },
+ });
+
+ pub fn Hash(comptime algorithm: Algorithm) type {
+ return switch (algorithm) {
+ .sha1WithRSAEncryption => crypto.hash.Sha1,
+ .ecdsa_with_SHA224, .sha224WithRSAEncryption => crypto.hash.sha2.Sha224,
+ .ecdsa_with_SHA256, .sha256WithRSAEncryption => crypto.hash.sha2.Sha256,
+ .ecdsa_with_SHA384, .sha384WithRSAEncryption => crypto.hash.sha2.Sha384,
+ .ecdsa_with_SHA512, .sha512WithRSAEncryption => crypto.hash.sha2.Sha512,
+ };
+ }
+};
+
+pub const AlgorithmCategory = enum {
+ rsaEncryption,
+ X9_62_id_ecPublicKey,
+
+ pub const map = std.ComptimeStringMap(AlgorithmCategory, .{
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x01 }, .rsaEncryption },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x02, 0x01 }, .X9_62_id_ecPublicKey },
+ });
+};
+
+pub const Attribute = enum {
+ commonName,
+ serialNumber,
+ countryName,
+ localityName,
+ stateOrProvinceName,
+ organizationName,
+ organizationalUnitName,
+ organizationIdentifier,
+ pkcs9_emailAddress,
+
+ pub const map = std.ComptimeStringMap(Attribute, .{
+ .{ &[_]u8{ 0x55, 0x04, 0x03 }, .commonName },
+ .{ &[_]u8{ 0x55, 0x04, 0x05 }, .serialNumber },
+ .{ &[_]u8{ 0x55, 0x04, 0x06 }, .countryName },
+ .{ &[_]u8{ 0x55, 0x04, 0x07 }, .localityName },
+ .{ &[_]u8{ 0x55, 0x04, 0x08 }, .stateOrProvinceName },
+ .{ &[_]u8{ 0x55, 0x04, 0x0A }, .organizationName },
+ .{ &[_]u8{ 0x55, 0x04, 0x0B }, .organizationalUnitName },
+ .{ &[_]u8{ 0x55, 0x04, 0x61 }, .organizationIdentifier },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x09, 0x01 }, .pkcs9_emailAddress },
+ });
+};
+
+pub const NamedCurve = enum {
+ secp384r1,
+ X9_62_prime256v1,
+
+ pub const map = std.ComptimeStringMap(NamedCurve, .{
+ .{ &[_]u8{ 0x2B, 0x81, 0x04, 0x00, 0x22 }, .secp384r1 },
+ .{ &[_]u8{ 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07 }, .X9_62_prime256v1 },
+ });
+};
+
+pub const ExtensionId = enum {
+ subject_key_identifier,
+ key_usage,
+ private_key_usage_period,
+ subject_alt_name,
+ issuer_alt_name,
+ basic_constraints,
+ crl_number,
+ certificate_policies,
+ authority_key_identifier,
+
+ pub const map = std.ComptimeStringMap(ExtensionId, .{
+ .{ &[_]u8{ 0x55, 0x1D, 0x0E }, .subject_key_identifier },
+ .{ &[_]u8{ 0x55, 0x1D, 0x0F }, .key_usage },
+ .{ &[_]u8{ 0x55, 0x1D, 0x10 }, .private_key_usage_period },
+ .{ &[_]u8{ 0x55, 0x1D, 0x11 }, .subject_alt_name },
+ .{ &[_]u8{ 0x55, 0x1D, 0x12 }, .issuer_alt_name },
+ .{ &[_]u8{ 0x55, 0x1D, 0x13 }, .basic_constraints },
+ .{ &[_]u8{ 0x55, 0x1D, 0x14 }, .crl_number },
+ .{ &[_]u8{ 0x55, 0x1D, 0x20 }, .certificate_policies },
+ .{ &[_]u8{ 0x55, 0x1D, 0x23 }, .authority_key_identifier },
+ });
+};
+
+pub const GeneralNameTag = enum(u5) {
+ otherName = 0,
+ rfc822Name = 1,
+ dNSName = 2,
+ x400Address = 3,
+ directoryName = 4,
+ ediPartyName = 5,
+ uniformResourceIdentifier = 6,
+ iPAddress = 7,
+ registeredID = 8,
+ _,
+};
+
+pub const Parsed = struct {
+ certificate: Certificate,
+ issuer_slice: Slice,
+ subject_slice: Slice,
+ common_name_slice: Slice,
+ signature_slice: Slice,
+ signature_algorithm: Algorithm,
+ pub_key_algo: PubKeyAlgo,
+ pub_key_slice: Slice,
+ message_slice: Slice,
+ subject_alt_name_slice: Slice,
+ validity: Validity,
+
+ pub const PubKeyAlgo = union(AlgorithmCategory) {
+ rsaEncryption: void,
+ X9_62_id_ecPublicKey: NamedCurve,
+ };
+
+ pub const Validity = struct {
+ not_before: u64,
+ not_after: u64,
+ };
+
+ pub const Slice = der.Element.Slice;
+
+ pub fn slice(p: Parsed, s: Slice) []const u8 {
+ return p.certificate.buffer[s.start..s.end];
+ }
+
+ pub fn issuer(p: Parsed) []const u8 {
+ return p.slice(p.issuer_slice);
+ }
+
+ pub fn subject(p: Parsed) []const u8 {
+ return p.slice(p.subject_slice);
+ }
+
+ pub fn commonName(p: Parsed) []const u8 {
+ return p.slice(p.common_name_slice);
+ }
+
+ pub fn signature(p: Parsed) []const u8 {
+ return p.slice(p.signature_slice);
+ }
+
+ pub fn pubKey(p: Parsed) []const u8 {
+ return p.slice(p.pub_key_slice);
+ }
+
+ pub fn pubKeySigAlgo(p: Parsed) []const u8 {
+ return p.slice(p.pub_key_signature_algorithm_slice);
+ }
+
+ pub fn message(p: Parsed) []const u8 {
+ return p.slice(p.message_slice);
+ }
+
+ pub fn subjectAltName(p: Parsed) []const u8 {
+ return p.slice(p.subject_alt_name_slice);
+ }
+
+ pub const VerifyError = error{
+ CertificateIssuerMismatch,
+ CertificateNotYetValid,
+ CertificateExpired,
+ CertificateSignatureAlgorithmUnsupported,
+ CertificateSignatureAlgorithmMismatch,
+ CertificateFieldHasInvalidLength,
+ CertificateFieldHasWrongDataType,
+ CertificatePublicKeyInvalid,
+ CertificateSignatureInvalidLength,
+ CertificateSignatureInvalid,
+ CertificateSignatureUnsupportedBitCount,
+ CertificateSignatureNamedCurveUnsupported,
+ };
+
+ /// This function verifies:
+ /// * That the subject's issuer is indeed the provided issuer.
+ /// * The time validity of the subject.
+ /// * The signature.
+ pub fn verify(parsed_subject: Parsed, parsed_issuer: Parsed, now_sec: i64) VerifyError!void {
+ // Check that the subject's issuer name matches the issuer's
+ // subject name.
+ if (!mem.eql(u8, parsed_subject.issuer(), parsed_issuer.subject())) {
+ return error.CertificateIssuerMismatch;
+ }
+
+ if (now_sec < parsed_subject.validity.not_before)
+ return error.CertificateNotYetValid;
+ if (now_sec > parsed_subject.validity.not_after)
+ return error.CertificateExpired;
+
+ switch (parsed_subject.signature_algorithm) {
+ inline .sha1WithRSAEncryption,
+ .sha224WithRSAEncryption,
+ .sha256WithRSAEncryption,
+ .sha384WithRSAEncryption,
+ .sha512WithRSAEncryption,
+ => |algorithm| return verifyRsa(
+ algorithm.Hash(),
+ parsed_subject.message(),
+ parsed_subject.signature(),
+ parsed_issuer.pub_key_algo,
+ parsed_issuer.pubKey(),
+ ),
+
+ inline .ecdsa_with_SHA224,
+ .ecdsa_with_SHA256,
+ .ecdsa_with_SHA384,
+ .ecdsa_with_SHA512,
+ => |algorithm| return verify_ecdsa(
+ algorithm.Hash(),
+ parsed_subject.message(),
+ parsed_subject.signature(),
+ parsed_issuer.pub_key_algo,
+ parsed_issuer.pubKey(),
+ ),
+ }
+ }
+
+ pub const VerifyHostNameError = error{
+ CertificateHostMismatch,
+ CertificateFieldHasInvalidLength,
+ };
+
+ pub fn verifyHostName(parsed_subject: Parsed, host_name: []const u8) VerifyHostNameError!void {
+ // If the Subject Alternative Names extension is present, this is
+ // what to check. Otherwise, only the common name is checked.
+ const subject_alt_name = parsed_subject.subjectAltName();
+ if (subject_alt_name.len == 0) {
+ if (checkHostName(host_name, parsed_subject.commonName())) {
+ return;
+ } else {
+ return error.CertificateHostMismatch;
+ }
+ }
+
+ const general_names = try der.Element.parse(subject_alt_name, 0);
+ var name_i = general_names.slice.start;
+ while (name_i < general_names.slice.end) {
+ const general_name = try der.Element.parse(subject_alt_name, name_i);
+ name_i = general_name.slice.end;
+ switch (@intToEnum(GeneralNameTag, @enumToInt(general_name.identifier.tag))) {
+ .dNSName => {
+ const dns_name = subject_alt_name[general_name.slice.start..general_name.slice.end];
+ if (checkHostName(host_name, dns_name)) return;
+ },
+ else => {},
+ }
+ }
+
+ return error.CertificateHostMismatch;
+ }
+
+ fn checkHostName(host_name: []const u8, dns_name: []const u8) bool {
+ if (mem.eql(u8, dns_name, host_name)) {
+ return true; // exact match
+ }
+
+ if (mem.startsWith(u8, dns_name, "*.")) {
+ // wildcard certificate, matches any subdomain
+ // TODO: I think wildcards are not supposed to match any prefix but
+ // only match exactly one subdomain.
+ if (mem.endsWith(u8, host_name, dns_name[1..])) {
+ // The host_name has a subdomain, but the important part matches.
+ return true;
+ }
+ if (mem.eql(u8, dns_name[2..], host_name)) {
+ // The host_name has no subdomain and matches exactly.
+ return true;
+ }
+ }
+
+ return false;
+ }
+};
+
+pub fn parse(cert: Certificate) !Parsed {
+ const cert_bytes = cert.buffer;
+ const certificate = try der.Element.parse(cert_bytes, cert.index);
+ const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start);
+ const version = try der.Element.parse(cert_bytes, tbs_certificate.slice.start);
+ try checkVersion(cert_bytes, version);
+ const serial_number = try der.Element.parse(cert_bytes, version.slice.end);
+ // RFC 5280, section 4.1.2.3:
+ // "This field MUST contain the same algorithm identifier as
+ // the signatureAlgorithm field in the sequence Certificate."
+ const tbs_signature = try der.Element.parse(cert_bytes, serial_number.slice.end);
+ const issuer = try der.Element.parse(cert_bytes, tbs_signature.slice.end);
+ const validity = try der.Element.parse(cert_bytes, issuer.slice.end);
+ const not_before = try der.Element.parse(cert_bytes, validity.slice.start);
+ const not_before_utc = try parseTime(cert, not_before);
+ const not_after = try der.Element.parse(cert_bytes, not_before.slice.end);
+ const not_after_utc = try parseTime(cert, not_after);
+ const subject = try der.Element.parse(cert_bytes, validity.slice.end);
+
+ const pub_key_info = try der.Element.parse(cert_bytes, subject.slice.end);
+ const pub_key_signature_algorithm = try der.Element.parse(cert_bytes, pub_key_info.slice.start);
+ const pub_key_algo_elem = try der.Element.parse(cert_bytes, pub_key_signature_algorithm.slice.start);
+ const pub_key_algo_tag = try parseAlgorithmCategory(cert_bytes, pub_key_algo_elem);
+ var pub_key_algo: Parsed.PubKeyAlgo = undefined;
+ switch (pub_key_algo_tag) {
+ .rsaEncryption => {
+ pub_key_algo = .{ .rsaEncryption = {} };
+ },
+ .X9_62_id_ecPublicKey => {
+ // RFC 5480 Section 2.1.1.1 Named Curve
+ // ECParameters ::= CHOICE {
+ // namedCurve OBJECT IDENTIFIER
+ // -- implicitCurve NULL
+ // -- specifiedCurve SpecifiedECDomain
+ // }
+ const params_elem = try der.Element.parse(cert_bytes, pub_key_algo_elem.slice.end);
+ const named_curve = try parseNamedCurve(cert_bytes, params_elem);
+ pub_key_algo = .{ .X9_62_id_ecPublicKey = named_curve };
+ },
+ }
+ const pub_key_elem = try der.Element.parse(cert_bytes, pub_key_signature_algorithm.slice.end);
+ const pub_key = try parseBitString(cert, pub_key_elem);
+
+ var common_name = der.Element.Slice.empty;
+ var name_i = subject.slice.start;
+ while (name_i < subject.slice.end) {
+ const rdn = try der.Element.parse(cert_bytes, name_i);
+ var rdn_i = rdn.slice.start;
+ while (rdn_i < rdn.slice.end) {
+ const atav = try der.Element.parse(cert_bytes, rdn_i);
+ var atav_i = atav.slice.start;
+ while (atav_i < atav.slice.end) {
+ const ty_elem = try der.Element.parse(cert_bytes, atav_i);
+ const ty = try parseAttribute(cert_bytes, ty_elem);
+ const val = try der.Element.parse(cert_bytes, ty_elem.slice.end);
+ switch (ty) {
+ .commonName => common_name = val.slice,
+ else => {},
+ }
+ atav_i = val.slice.end;
+ }
+ rdn_i = atav.slice.end;
+ }
+ name_i = rdn.slice.end;
+ }
+
+ const sig_algo = try der.Element.parse(cert_bytes, tbs_certificate.slice.end);
+ const algo_elem = try der.Element.parse(cert_bytes, sig_algo.slice.start);
+ const signature_algorithm = try parseAlgorithm(cert_bytes, algo_elem);
+ const sig_elem = try der.Element.parse(cert_bytes, sig_algo.slice.end);
+ const signature = try parseBitString(cert, sig_elem);
+
+ // Extensions
+ var subject_alt_name_slice = der.Element.Slice.empty;
+ ext: {
+ if (pub_key_info.slice.end >= tbs_certificate.slice.end)
+ break :ext;
+
+ const outer_extensions = try der.Element.parse(cert_bytes, pub_key_info.slice.end);
+ if (outer_extensions.identifier.tag != .bitstring)
+ break :ext;
+
+ const extensions = try der.Element.parse(cert_bytes, outer_extensions.slice.start);
+
+ var ext_i = extensions.slice.start;
+ while (ext_i < extensions.slice.end) {
+ const extension = try der.Element.parse(cert_bytes, ext_i);
+ ext_i = extension.slice.end;
+ const oid_elem = try der.Element.parse(cert_bytes, extension.slice.start);
+ const ext_id = parseExtensionId(cert_bytes, oid_elem) catch |err| switch (err) {
+ error.CertificateHasUnrecognizedObjectId => continue,
+ else => |e| return e,
+ };
+ const critical_elem = try der.Element.parse(cert_bytes, oid_elem.slice.end);
+ const ext_bytes_elem = if (critical_elem.identifier.tag != .boolean)
+ critical_elem
+ else
+ try der.Element.parse(cert_bytes, critical_elem.slice.end);
+ switch (ext_id) {
+ .subject_alt_name => subject_alt_name_slice = ext_bytes_elem.slice,
+ else => continue,
+ }
+ }
+ }
+
+ return .{
+ .certificate = cert,
+ .common_name_slice = common_name,
+ .issuer_slice = issuer.slice,
+ .subject_slice = subject.slice,
+ .signature_slice = signature,
+ .signature_algorithm = signature_algorithm,
+ .message_slice = .{ .start = certificate.slice.start, .end = tbs_certificate.slice.end },
+ .pub_key_algo = pub_key_algo,
+ .pub_key_slice = pub_key,
+ .validity = .{
+ .not_before = not_before_utc,
+ .not_after = not_after_utc,
+ },
+ .subject_alt_name_slice = subject_alt_name_slice,
+ };
+}
+
+pub fn verify(subject: Certificate, issuer: Certificate, now_sec: i64) !void {
+ const parsed_subject = try subject.parse();
+ const parsed_issuer = try issuer.parse();
+ return parsed_subject.verify(parsed_issuer, now_sec);
+}
+
+pub fn contents(cert: Certificate, elem: der.Element) []const u8 {
+ return cert.buffer[elem.slice.start..elem.slice.end];
+}
+
+pub fn parseBitString(cert: Certificate, elem: der.Element) !der.Element.Slice {
+ if (elem.identifier.tag != .bitstring) return error.CertificateFieldHasWrongDataType;
+ if (cert.buffer[elem.slice.start] != 0) return error.CertificateHasInvalidBitString;
+ return .{ .start = elem.slice.start + 1, .end = elem.slice.end };
+}
+
+/// Returns number of seconds since epoch.
+pub fn parseTime(cert: Certificate, elem: der.Element) !u64 {
+ const bytes = cert.contents(elem);
+ switch (elem.identifier.tag) {
+ .utc_time => {
+ // Example: "YYMMDD000000Z"
+ if (bytes.len != 13)
+ return error.CertificateTimeInvalid;
+ if (bytes[12] != 'Z')
+ return error.CertificateTimeInvalid;
+
+ return Date.toSeconds(.{
+ .year = @as(u16, 2000) + try parseTimeDigits(bytes[0..2].*, 0, 99),
+ .month = try parseTimeDigits(bytes[2..4].*, 1, 12),
+ .day = try parseTimeDigits(bytes[4..6].*, 1, 31),
+ .hour = try parseTimeDigits(bytes[6..8].*, 0, 23),
+ .minute = try parseTimeDigits(bytes[8..10].*, 0, 59),
+ .second = try parseTimeDigits(bytes[10..12].*, 0, 59),
+ });
+ },
+ .generalized_time => {
+ // Examples:
+ // "19920521000000Z"
+ // "19920622123421Z"
+ // "19920722132100.3Z"
+ if (bytes.len < 15)
+ return error.CertificateTimeInvalid;
+ return Date.toSeconds(.{
+ .year = try parseYear4(bytes[0..4]),
+ .month = try parseTimeDigits(bytes[4..6].*, 1, 12),
+ .day = try parseTimeDigits(bytes[6..8].*, 1, 31),
+ .hour = try parseTimeDigits(bytes[8..10].*, 0, 23),
+ .minute = try parseTimeDigits(bytes[10..12].*, 0, 59),
+ .second = try parseTimeDigits(bytes[12..14].*, 0, 59),
+ });
+ },
+ else => return error.CertificateFieldHasWrongDataType,
+ }
+}
+
+const Date = struct {
+ /// example: 1999
+ year: u16,
+ /// range: 1 to 12
+ month: u8,
+ /// range: 1 to 31
+ day: u8,
+ /// range: 0 to 59
+ hour: u8,
+ /// range: 0 to 59
+ minute: u8,
+ /// range: 0 to 59
+ second: u8,
+
+ /// Convert to number of seconds since epoch.
+ pub fn toSeconds(date: Date) u64 {
+ var sec: u64 = 0;
+
+ {
+ var year: u16 = 1970;
+ while (year < date.year) : (year += 1) {
+ const days: u64 = std.time.epoch.getDaysInYear(year);
+ sec += days * std.time.epoch.secs_per_day;
+ }
+ }
+
+ {
+ const is_leap = std.time.epoch.isLeapYear(date.year);
+ var month: u4 = 1;
+ while (month < date.month) : (month += 1) {
+ const days: u64 = std.time.epoch.getDaysInMonth(
+ @intToEnum(std.time.epoch.YearLeapKind, @boolToInt(is_leap)),
+ @intToEnum(std.time.epoch.Month, month),
+ );
+ sec += days * std.time.epoch.secs_per_day;
+ }
+ }
+
+ sec += (date.day - 1) * @as(u64, std.time.epoch.secs_per_day);
+ sec += date.hour * @as(u64, 60 * 60);
+ sec += date.minute * @as(u64, 60);
+ sec += date.second;
+
+ return sec;
+ }
+};
+
+pub fn parseTimeDigits(nn: @Vector(2, u8), min: u8, max: u8) !u8 {
+ const zero: @Vector(2, u8) = .{ '0', '0' };
+ const mm: @Vector(2, u8) = .{ 10, 1 };
+ const result = @reduce(.Add, (nn -% zero) *% mm);
+ if (result < min) return error.CertificateTimeInvalid;
+ if (result > max) return error.CertificateTimeInvalid;
+ return result;
+}
+
+test parseTimeDigits {
+ const expectEqual = std.testing.expectEqual;
+ try expectEqual(@as(u8, 0), try parseTimeDigits("00".*, 0, 99));
+ try expectEqual(@as(u8, 99), try parseTimeDigits("99".*, 0, 99));
+ try expectEqual(@as(u8, 42), try parseTimeDigits("42".*, 0, 99));
+
+ const expectError = std.testing.expectError;
+ try expectError(error.CertificateTimeInvalid, parseTimeDigits("13".*, 1, 12));
+ try expectError(error.CertificateTimeInvalid, parseTimeDigits("00".*, 1, 12));
+}
+
+pub fn parseYear4(text: *const [4]u8) !u16 {
+ const nnnn: @Vector(4, u16) = .{ text[0], text[1], text[2], text[3] };
+ const zero: @Vector(4, u16) = .{ '0', '0', '0', '0' };
+ const mmmm: @Vector(4, u16) = .{ 1000, 100, 10, 1 };
+ const result = @reduce(.Add, (nnnn -% zero) *% mmmm);
+ if (result > 9999) return error.CertificateTimeInvalid;
+ return result;
+}
+
+test parseYear4 {
+ const expectEqual = std.testing.expectEqual;
+ try expectEqual(@as(u16, 0), try parseYear4("0000"));
+ try expectEqual(@as(u16, 9999), try parseYear4("9999"));
+ try expectEqual(@as(u16, 1988), try parseYear4("1988"));
+
+ const expectError = std.testing.expectError;
+ try expectError(error.CertificateTimeInvalid, parseYear4("999b"));
+ try expectError(error.CertificateTimeInvalid, parseYear4("crap"));
+}
+
+pub fn parseAlgorithm(bytes: []const u8, element: der.Element) !Algorithm {
+ return parseEnum(Algorithm, bytes, element);
+}
+
+pub fn parseAlgorithmCategory(bytes: []const u8, element: der.Element) !AlgorithmCategory {
+ return parseEnum(AlgorithmCategory, bytes, element);
+}
+
+pub fn parseAttribute(bytes: []const u8, element: der.Element) !Attribute {
+ return parseEnum(Attribute, bytes, element);
+}
+
+pub fn parseNamedCurve(bytes: []const u8, element: der.Element) !NamedCurve {
+ return parseEnum(NamedCurve, bytes, element);
+}
+
+pub fn parseExtensionId(bytes: []const u8, element: der.Element) !ExtensionId {
+ return parseEnum(ExtensionId, bytes, element);
+}
+
+fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) !E {
+ if (element.identifier.tag != .object_identifier)
+ return error.CertificateFieldHasWrongDataType;
+ const oid_bytes = bytes[element.slice.start..element.slice.end];
+ return E.map.get(oid_bytes) orelse return error.CertificateHasUnrecognizedObjectId;
+}
+
+pub fn checkVersion(bytes: []const u8, version: der.Element) !void {
+ if (@bitCast(u8, version.identifier) != 0xa0 or
+ !mem.eql(u8, bytes[version.slice.start..version.slice.end], "\x02\x01\x02"))
+ {
+ return error.UnsupportedCertificateVersion;
+ }
+}
+
+fn verifyRsa(
+ comptime Hash: type,
+ message: []const u8,
+ sig: []const u8,
+ pub_key_algo: Parsed.PubKeyAlgo,
+ pub_key: []const u8,
+) !void {
+ if (pub_key_algo != .rsaEncryption) return error.CertificateSignatureAlgorithmMismatch;
+ const pk_components = try rsa.PublicKey.parseDer(pub_key);
+ const exponent = pk_components.exponent;
+ const modulus = pk_components.modulus;
+ if (exponent.len > modulus.len) return error.CertificatePublicKeyInvalid;
+ if (sig.len != modulus.len) return error.CertificateSignatureInvalidLength;
+
+ const hash_der = switch (Hash) {
+ crypto.hash.Sha1 => [_]u8{
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e,
+ 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14,
+ },
+ crypto.hash.sha2.Sha224 => [_]u8{
+ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05,
+ 0x00, 0x04, 0x1c,
+ },
+ crypto.hash.sha2.Sha256 => [_]u8{
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
+ 0x00, 0x04, 0x20,
+ },
+ crypto.hash.sha2.Sha384 => [_]u8{
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05,
+ 0x00, 0x04, 0x30,
+ },
+ crypto.hash.sha2.Sha512 => [_]u8{
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
+ 0x00, 0x04, 0x40,
+ },
+ else => @compileError("unreachable"),
+ };
+
+ var msg_hashed: [Hash.digest_length]u8 = undefined;
+ Hash.hash(message, &msg_hashed, .{});
+
+ var rsa_mem_buf: [512 * 64]u8 = undefined;
+ var fba = std.heap.FixedBufferAllocator.init(&rsa_mem_buf);
+ const ally = fba.allocator();
+
+ switch (modulus.len) {
+ inline 128, 256, 512 => |modulus_len| {
+ const ps_len = modulus_len - (hash_der.len + msg_hashed.len) - 3;
+ const em: [modulus_len]u8 =
+ [2]u8{ 0, 1 } ++
+ ([1]u8{0xff} ** ps_len) ++
+ [1]u8{0} ++
+ hash_der ++
+ msg_hashed;
+
+ const public_key = rsa.PublicKey.fromBytes(exponent, modulus, ally) catch |err| switch (err) {
+ error.OutOfMemory => unreachable, // rsa_mem_buf is big enough
+ };
+ const em_dec = rsa.encrypt(modulus_len, sig[0..modulus_len].*, public_key, ally) catch |err| switch (err) {
+ error.OutOfMemory => unreachable, // rsa_mem_buf is big enough
+
+ error.MessageTooLong => unreachable,
+ error.NegativeIntoUnsigned => @panic("TODO make RSA not emit this error"),
+ error.TargetTooSmall => @panic("TODO make RSA not emit this error"),
+ error.BufferTooSmall => @panic("TODO make RSA not emit this error"),
+ };
+
+ if (!mem.eql(u8, &em, &em_dec)) {
+ return error.CertificateSignatureInvalid;
+ }
+ },
+ else => {
+ return error.CertificateSignatureUnsupportedBitCount;
+ },
+ }
+}
+
+fn verify_ecdsa(
+ comptime Hash: type,
+ message: []const u8,
+ encoded_sig: []const u8,
+ pub_key_algo: Parsed.PubKeyAlgo,
+ sec1_pub_key: []const u8,
+) !void {
+ const sig_named_curve = switch (pub_key_algo) {
+ .X9_62_id_ecPublicKey => |named_curve| named_curve,
+ else => return error.CertificateSignatureAlgorithmMismatch,
+ };
+
+ switch (sig_named_curve) {
+ .secp384r1 => {
+ const P = crypto.ecc.P384;
+ const Ecdsa = crypto.sign.ecdsa.Ecdsa(P, Hash);
+ const sig = Ecdsa.Signature.fromDer(encoded_sig) catch |err| switch (err) {
+ error.InvalidEncoding => return error.CertificateSignatureInvalid,
+ };
+ const pub_key = Ecdsa.PublicKey.fromSec1(sec1_pub_key) catch |err| switch (err) {
+ error.InvalidEncoding => return error.CertificateSignatureInvalid,
+ error.NonCanonical => return error.CertificateSignatureInvalid,
+ error.NotSquare => return error.CertificateSignatureInvalid,
+ };
+ sig.verify(message, pub_key) catch |err| switch (err) {
+ error.IdentityElement => return error.CertificateSignatureInvalid,
+ error.NonCanonical => return error.CertificateSignatureInvalid,
+ error.SignatureVerificationFailed => return error.CertificateSignatureInvalid,
+ };
+ },
+ .X9_62_prime256v1 => {
+ return error.CertificateSignatureNamedCurveUnsupported;
+ },
+ }
+}
+
+const std = @import("../std.zig");
+const crypto = std.crypto;
+const mem = std.mem;
+const Certificate = @This();
+
+pub const der = struct {
+ pub const Class = enum(u2) {
+ universal,
+ application,
+ context_specific,
+ private,
+ };
+
+ pub const PC = enum(u1) {
+ primitive,
+ constructed,
+ };
+
+ pub const Identifier = packed struct(u8) {
+ tag: Tag,
+ pc: PC,
+ class: Class,
+ };
+
+ pub const Tag = enum(u5) {
+ boolean = 1,
+ integer = 2,
+ bitstring = 3,
+ octetstring = 4,
+ null = 5,
+ object_identifier = 6,
+ sequence = 16,
+ sequence_of = 17,
+ utc_time = 23,
+ generalized_time = 24,
+ _,
+ };
+
+ pub const Element = struct {
+ identifier: Identifier,
+ slice: Slice,
+
+ pub const Slice = struct {
+ start: u32,
+ end: u32,
+
+ pub const empty: Slice = .{ .start = 0, .end = 0 };
+ };
+
+ pub const ParseError = error{CertificateFieldHasInvalidLength};
+
+ pub fn parse(bytes: []const u8, index: u32) ParseError!Element {
+ var i = index;
+ const identifier = @bitCast(Identifier, bytes[i]);
+ i += 1;
+ const size_byte = bytes[i];
+ i += 1;
+ if ((size_byte >> 7) == 0) {
+ return .{
+ .identifier = identifier,
+ .slice = .{
+ .start = i,
+ .end = i + size_byte,
+ },
+ };
+ }
+
+ const len_size = @truncate(u7, size_byte);
+ if (len_size > @sizeOf(u32)) {
+ return error.CertificateFieldHasInvalidLength;
+ }
+
+ const end_i = i + len_size;
+ var long_form_size: u32 = 0;
+ while (i < end_i) : (i += 1) {
+ long_form_size = (long_form_size << 8) | bytes[i];
+ }
+
+ return .{
+ .identifier = identifier,
+ .slice = .{
+ .start = i,
+ .end = i + long_form_size,
+ },
+ };
+ }
+ };
+};
+
+test {
+ _ = Bundle;
+}
+
+/// TODO: replace this with Frank's upcoming RSA implementation. the verify
+/// function won't have the possibility of failure - it will either identify a
+/// valid signature or an invalid signature.
+/// This code is borrowed from https://github.com/shiguredo/tls13-zig
+/// which is licensed under the Apache License Version 2.0, January 2004
+/// http://www.apache.org/licenses/
+/// The code has been modified.
+pub const rsa = struct {
+ const BigInt = std.math.big.int.Managed;
+
+ pub const PSSSignature = struct {
+ pub fn fromBytes(comptime modulus_len: usize, msg: []const u8) [modulus_len]u8 {
+ var result = [1]u8{0} ** modulus_len;
+ std.mem.copy(u8, &result, msg);
+ return result;
+ }
+
+ pub fn verify(comptime modulus_len: usize, sig: [modulus_len]u8, msg: []const u8, public_key: PublicKey, comptime Hash: type, allocator: std.mem.Allocator) !void {
+ const mod_bits = try countBits(public_key.n.toConst(), allocator);
+ const em_dec = try encrypt(modulus_len, sig, public_key, allocator);
+
+ try EMSA_PSS_VERIFY(msg, &em_dec, mod_bits - 1, Hash.digest_length, Hash, allocator);
+ }
+
+ fn EMSA_PSS_VERIFY(msg: []const u8, em: []const u8, emBit: usize, sLen: usize, comptime Hash: type, allocator: std.mem.Allocator) !void {
+ // TODO
+ // 1. If the length of M is greater than the input limitation for
+ // the hash function (2^61 - 1 octets for SHA-1), output
+ // "inconsistent" and stop.
+
+ // emLen = \ceil(emBits/8)
+ const emLen = ((emBit - 1) / 8) + 1;
+ std.debug.assert(emLen == em.len);
+
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+ var mHash: [Hash.digest_length]u8 = undefined;
+ Hash.hash(msg, &mHash, .{});
+
+ // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
+ if (emLen < Hash.digest_length + sLen + 2) {
+ return error.InvalidSignature;
+ }
+
+ // 4. If the rightmost octet of EM does not have hexadecimal value
+ // 0xbc, output "inconsistent" and stop.
+ if (em[em.len - 1] != 0xbc) {
+ return error.InvalidSignature;
+ }
+
+ // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM,
+ // and let H be the next hLen octets.
+ const maskedDB = em[0..(emLen - Hash.digest_length - 1)];
+ const h = em[(emLen - Hash.digest_length - 1)..(emLen - 1)];
+
+ // 6. If the leftmost 8emLen - emBits bits of the leftmost octet in
+ // maskedDB are not all equal to zero, output "inconsistent" and
+ // stop.
+ const zero_bits = emLen * 8 - emBit;
+ var mask: u8 = maskedDB[0];
+ var i: usize = 0;
+ while (i < 8 - zero_bits) : (i += 1) {
+ mask = mask >> 1;
+ }
+ if (mask != 0) {
+ return error.InvalidSignature;
+ }
+
+ // 7. Let dbMask = MGF(H, emLen - hLen - 1).
+ const mgf_len = emLen - Hash.digest_length - 1;
+ var mgf_out = try allocator.alloc(u8, ((mgf_len - 1) / Hash.digest_length + 1) * Hash.digest_length);
+ defer allocator.free(mgf_out);
+ var dbMask = try MGF1(mgf_out, h, mgf_len, Hash, allocator);
+
+ // 8. Let DB = maskedDB \xor dbMask.
+ i = 0;
+ while (i < dbMask.len) : (i += 1) {
+ dbMask[i] = maskedDB[i] ^ dbMask[i];
+ }
+
+ // 9. Set the leftmost 8emLen - emBits bits of the leftmost octet
+ // in DB to zero.
+ i = 0;
+ mask = 0;
+ while (i < 8 - zero_bits) : (i += 1) {
+ mask = mask << 1;
+ mask += 1;
+ }
+ dbMask[0] = dbMask[0] & mask;
+
+ // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not
+ // zero or if the octet at position emLen - hLen - sLen - 1 (the
+ // leftmost position is "position 1") does not have hexadecimal
+ // value 0x01, output "inconsistent" and stop.
+ if (dbMask[mgf_len - sLen - 2] != 0x00) {
+ return error.InvalidSignature;
+ }
+
+ if (dbMask[mgf_len - sLen - 1] != 0x01) {
+ return error.InvalidSignature;
+ }
+
+ // 11. Let salt be the last sLen octets of DB.
+ const salt = dbMask[(mgf_len - sLen)..];
+
+ // 12. Let
+ // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;
+ // M' is an octet string of length 8 + hLen + sLen with eight
+ // initial zero octets.
+ var m_p = try allocator.alloc(u8, 8 + Hash.digest_length + sLen);
+ defer allocator.free(m_p);
+ std.mem.copy(u8, m_p, &([_]u8{0} ** 8));
+ std.mem.copy(u8, m_p[8..], &mHash);
+ std.mem.copy(u8, m_p[(8 + Hash.digest_length)..], salt);
+
+ // 13. Let H' = Hash(M'), an octet string of length hLen.
+ var h_p: [Hash.digest_length]u8 = undefined;
+ Hash.hash(m_p, &h_p, .{});
+
+ // 14. If H = H', output "consistent". Otherwise, output
+ // "inconsistent".
+ if (!std.mem.eql(u8, h, &h_p)) {
+ return error.InvalidSignature;
+ }
+ }
+
+ fn MGF1(out: []u8, seed: []const u8, len: usize, comptime Hash: type, allocator: std.mem.Allocator) ![]u8 {
+ var counter: usize = 0;
+ var idx: usize = 0;
+ var c: [4]u8 = undefined;
+
+ var hash = try allocator.alloc(u8, seed.len + c.len);
+ defer allocator.free(hash);
+ std.mem.copy(u8, hash, seed);
+ var hashed: [Hash.digest_length]u8 = undefined;
+
+ while (idx < len) {
+ c[0] = @intCast(u8, (counter >> 24) & 0xFF);
+ c[1] = @intCast(u8, (counter >> 16) & 0xFF);
+ c[2] = @intCast(u8, (counter >> 8) & 0xFF);
+ c[3] = @intCast(u8, counter & 0xFF);
+
+ std.mem.copy(u8, hash[seed.len..], &c);
+ Hash.hash(hash, &hashed, .{});
+
+ std.mem.copy(u8, out[idx..], &hashed);
+ idx += hashed.len;
+
+ counter += 1;
+ }
+
+ return out[0..len];
+ }
+ };
+
+ pub const PublicKey = struct {
+ n: BigInt,
+ e: BigInt,
+
+ pub fn deinit(self: *PublicKey) void {
+ self.n.deinit();
+ self.e.deinit();
+ }
+
+ pub fn fromBytes(pub_bytes: []const u8, modulus_bytes: []const u8, allocator: std.mem.Allocator) !PublicKey {
+ var _n = try BigInt.init(allocator);
+ errdefer _n.deinit();
+ try setBytes(&_n, modulus_bytes, allocator);
+
+ var _e = try BigInt.init(allocator);
+ errdefer _e.deinit();
+ try setBytes(&_e, pub_bytes, allocator);
+
+ return .{
+ .n = _n,
+ .e = _e,
+ };
+ }
+
+ pub fn parseDer(pub_key: []const u8) !struct { modulus: []const u8, exponent: []const u8 } {
+ const pub_key_seq = try der.Element.parse(pub_key, 0);
+ if (pub_key_seq.identifier.tag != .sequence) return error.CertificateFieldHasWrongDataType;
+ const modulus_elem = try der.Element.parse(pub_key, pub_key_seq.slice.start);
+ if (modulus_elem.identifier.tag != .integer) return error.CertificateFieldHasWrongDataType;
+ const exponent_elem = try der.Element.parse(pub_key, modulus_elem.slice.end);
+ if (exponent_elem.identifier.tag != .integer) return error.CertificateFieldHasWrongDataType;
+ // Skip over meaningless zeroes in the modulus.
+ const modulus_raw = pub_key[modulus_elem.slice.start..modulus_elem.slice.end];
+ const modulus_offset = for (modulus_raw) |byte, i| {
+ if (byte != 0) break i;
+ } else modulus_raw.len;
+ return .{
+ .modulus = modulus_raw[modulus_offset..],
+ .exponent = pub_key[exponent_elem.slice.start..exponent_elem.slice.end],
+ };
+ }
+ };
+
+ fn encrypt(comptime modulus_len: usize, msg: [modulus_len]u8, public_key: PublicKey, allocator: std.mem.Allocator) ![modulus_len]u8 {
+ var m = try BigInt.init(allocator);
+ defer m.deinit();
+
+ try setBytes(&m, &msg, allocator);
+
+ if (m.order(public_key.n) != .lt) {
+ return error.MessageTooLong;
+ }
+
+ var e = try BigInt.init(allocator);
+ defer e.deinit();
+
+ try pow_montgomery(&e, &m, &public_key.e, &public_key.n, allocator);
+
+ var res: [modulus_len]u8 = undefined;
+
+ try toBytes(&res, &e, allocator);
+
+ return res;
+ }
+
+ fn setBytes(r: *BigInt, bytes: []const u8, allcator: std.mem.Allocator) !void {
+ try r.set(0);
+ var tmp = try BigInt.init(allcator);
+ defer tmp.deinit();
+ for (bytes) |b| {
+ try r.shiftLeft(r, 8);
+ try tmp.set(b);
+ try r.add(r, &tmp);
+ }
+ }
+
+ fn pow_montgomery(r: *BigInt, a: *const BigInt, x: *const BigInt, n: *const BigInt, allocator: std.mem.Allocator) !void {
+ var bin_raw: [512]u8 = undefined;
+ try toBytes(&bin_raw, x, allocator);
+
+ var i: usize = 0;
+ while (bin_raw[i] == 0x00) : (i += 1) {}
+ const bin = bin_raw[i..];
+
+ try r.set(1);
+ var r1 = try BigInt.init(allocator);
+ defer r1.deinit();
+ try BigInt.copy(&r1, a.toConst());
+ i = 0;
+ while (i < bin.len * 8) : (i += 1) {
+ if (((bin[i / 8] >> @intCast(u3, (7 - (i % 8)))) & 0x1) == 0) {
+ try BigInt.mul(&r1, r, &r1);
+ try mod(&r1, &r1, n, allocator);
+ try BigInt.sqr(r, r);
+ try mod(r, r, n, allocator);
+ } else {
+ try BigInt.mul(r, r, &r1);
+ try mod(r, r, n, allocator);
+ try BigInt.sqr(&r1, &r1);
+ try mod(&r1, &r1, n, allocator);
+ }
+ }
+ }
+
+ fn toBytes(out: []u8, a: *const BigInt, allocator: std.mem.Allocator) !void {
+ const Error = error{
+ BufferTooSmall,
+ };
+
+ var mask = try BigInt.initSet(allocator, 0xFF);
+ defer mask.deinit();
+ var tmp = try BigInt.init(allocator);
+ defer tmp.deinit();
+
+ var a_copy = try BigInt.init(allocator);
+ defer a_copy.deinit();
+ try a_copy.copy(a.toConst());
+
+ // Encoding into big-endian bytes
+ var i: usize = 0;
+ while (i < out.len) : (i += 1) {
+ try tmp.bitAnd(&a_copy, &mask);
+ const b = try tmp.to(u8);
+ out[out.len - i - 1] = b;
+ try a_copy.shiftRight(&a_copy, 8);
+ }
+
+ if (!a_copy.eqZero()) {
+ return Error.BufferTooSmall;
+ }
+ }
+
+ fn mod(rem: *BigInt, a: *const BigInt, n: *const BigInt, allocator: std.mem.Allocator) !void {
+ var q = try BigInt.init(allocator);
+ defer q.deinit();
+
+ try BigInt.divFloor(&q, rem, a, n);
+ }
+
+ fn countBits(a: std.math.big.int.Const, allocator: std.mem.Allocator) !usize {
+ var i: usize = 0;
+ var a_copy = try BigInt.init(allocator);
+ defer a_copy.deinit();
+ try a_copy.copy(a);
+
+ while (!a_copy.eqZero()) {
+ try a_copy.shiftRight(&a_copy, 1);
+ i += 1;
+ }
+
+ return i;
+ }
+};
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
new file mode 100644
index 0000000000..a1684fda73
--- /dev/null
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -0,0 +1,189 @@
+//! A set of certificates. Typically pre-installed on every operating system,
+//! these are "Certificate Authorities" used to validate SSL certificates.
+//! This data structure stores certificates in DER-encoded form, all of them
+//! concatenated together in the `bytes` array. The `map` field contains an
+//! index from the DER-encoded subject name to the index of the containing
+//! certificate within `bytes`.
+
+/// The key is the contents slice of the subject.
+map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .{},
+bytes: std.ArrayListUnmanaged(u8) = .{},
+
+pub const VerifyError = Certificate.Parsed.VerifyError || error{
+ CertificateIssuerNotFound,
+};
+
+pub fn verify(cb: Bundle, subject: Certificate.Parsed, now_sec: i64) VerifyError!void {
+ const bytes_index = cb.find(subject.issuer()) orelse return error.CertificateIssuerNotFound;
+ const issuer_cert: Certificate = .{
+ .buffer = cb.bytes.items,
+ .index = bytes_index,
+ };
+ // Every certificate in the bundle is pre-parsed before adding it, ensuring
+ // that parsing will succeed here.
+ const issuer = issuer_cert.parse() catch unreachable;
+ try subject.verify(issuer, now_sec);
+}
+
+/// The returned bytes become invalid after calling any of the rescan functions
+/// or add functions.
+pub fn find(cb: Bundle, subject_name: []const u8) ?u32 {
+ const Adapter = struct {
+ cb: Bundle,
+
+ pub fn hash(ctx: @This(), k: []const u8) u64 {
+ _ = ctx;
+ return std.hash_map.hashString(k);
+ }
+
+ pub fn eql(ctx: @This(), a: []const u8, b_key: der.Element.Slice) bool {
+ const b = ctx.cb.bytes.items[b_key.start..b_key.end];
+ return mem.eql(u8, a, b);
+ }
+ };
+ return cb.map.getAdapted(subject_name, Adapter{ .cb = cb });
+}
+
+pub fn deinit(cb: *Bundle, gpa: Allocator) void {
+ cb.map.deinit(gpa);
+ cb.bytes.deinit(gpa);
+ cb.* = undefined;
+}
+
+/// Clears the set of certificates and then scans the host operating system
+/// file system standard locations for certificates.
+/// For operating systems that do not have standard CA installations to be
+/// found, this function clears the set of certificates.
+pub fn rescan(cb: *Bundle, gpa: Allocator) !void {
+ switch (builtin.os.tag) {
+ .linux => return rescanLinux(cb, gpa),
+ .windows => {
+ // TODO
+ },
+ .macos => {
+ // TODO
+ },
+ else => {},
+ }
+}
+
+pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void {
+ var dir = fs.openIterableDirAbsolute("/etc/ssl/certs", .{}) catch |err| switch (err) {
+ error.FileNotFound => return,
+ else => |e| return e,
+ };
+ defer dir.close();
+
+ cb.bytes.clearRetainingCapacity();
+ cb.map.clearRetainingCapacity();
+
+ var it = dir.iterate();
+ while (try it.next()) |entry| {
+ switch (entry.kind) {
+ .File, .SymLink => {},
+ else => continue,
+ }
+
+ try addCertsFromFile(cb, gpa, dir.dir, entry.name);
+ }
+
+ cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
+}
+
+pub fn addCertsFromFile(
+ cb: *Bundle,
+ gpa: Allocator,
+ dir: fs.Dir,
+ sub_file_path: []const u8,
+) !void {
+ var file = try dir.openFile(sub_file_path, .{});
+ defer file.close();
+
+ const size = try file.getEndPos();
+
+ // We borrow `bytes` as a temporary buffer for the base64-encoded data.
+ // This is possible by computing the decoded length and reserving the space
+ // for the decoded bytes first.
+ const decoded_size_upper_bound = size / 4 * 3;
+ const needed_capacity = std.math.cast(u32, decoded_size_upper_bound + size) orelse
+ return error.CertificateAuthorityBundleTooBig;
+ try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
+ const end_reserved = @intCast(u32, cb.bytes.items.len + decoded_size_upper_bound);
+ const buffer = cb.bytes.allocatedSlice()[end_reserved..];
+ const end_index = try file.readAll(buffer);
+ const encoded_bytes = buffer[0..end_index];
+
+ const begin_marker = "-----BEGIN CERTIFICATE-----";
+ const end_marker = "-----END CERTIFICATE-----";
+
+ const now_sec = std.time.timestamp();
+
+ var start_index: usize = 0;
+ while (mem.indexOfPos(u8, encoded_bytes, start_index, begin_marker)) |begin_marker_start| {
+ const cert_start = begin_marker_start + begin_marker.len;
+ const cert_end = mem.indexOfPos(u8, encoded_bytes, cert_start, end_marker) orelse
+ return error.MissingEndCertificateMarker;
+ start_index = cert_end + end_marker.len;
+ const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n");
+ const decoded_start = @intCast(u32, cb.bytes.items.len);
+ const dest_buf = cb.bytes.allocatedSlice()[decoded_start..];
+ cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert);
+ // Even though we could only partially parse the certificate to find
+ // the subject name, we pre-parse all of them to make sure and only
+ // include in the bundle ones that we know will parse. This way we can
+ // use `catch unreachable` later.
+ const parsed_cert = try Certificate.parse(.{
+ .buffer = cb.bytes.items,
+ .index = decoded_start,
+ });
+ if (now_sec > parsed_cert.validity.not_after) {
+ // Ignore expired cert.
+ cb.bytes.items.len = decoded_start;
+ continue;
+ }
+ const gop = try cb.map.getOrPutContext(gpa, parsed_cert.subject_slice, .{ .cb = cb });
+ if (gop.found_existing) {
+ cb.bytes.items.len = decoded_start;
+ } else {
+ gop.value_ptr.* = decoded_start;
+ }
+ }
+}
+
+const builtin = @import("builtin");
+const std = @import("../../std.zig");
+const fs = std.fs;
+const mem = std.mem;
+const crypto = std.crypto;
+const Allocator = std.mem.Allocator;
+const Certificate = std.crypto.Certificate;
+const der = Certificate.der;
+const Bundle = @This();
+
+const base64 = std.base64.standard.decoderWithIgnore(" \t\r\n");
+
+const MapContext = struct {
+ cb: *const Bundle,
+
+ pub fn hash(ctx: MapContext, k: der.Element.Slice) u64 {
+ return std.hash_map.hashString(ctx.cb.bytes.items[k.start..k.end]);
+ }
+
+ pub fn eql(ctx: MapContext, a: der.Element.Slice, b: der.Element.Slice) bool {
+ const bytes = ctx.cb.bytes.items;
+ return mem.eql(
+ u8,
+ bytes[a.start..a.end],
+ bytes[b.start..b.end],
+ );
+ }
+};
+
+test "scan for OS-provided certificates" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ var bundle: Bundle = .{};
+ defer bundle.deinit(std.testing.allocator);
+
+ try bundle.rescan(std.testing.allocator);
+}
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index 01dd5d547b..da09aca351 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -174,7 +174,7 @@ pub const Aegis128L = struct {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
- mem.set(u8, m, 0xaa);
+ @memset(m.ptr, undefined, m.len);
return error.AuthenticationFailed;
}
}
@@ -343,7 +343,7 @@ pub const Aegis256 = struct {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
- mem.set(u8, m, 0xaa);
+ @memset(m.ptr, undefined, m.len);
return error.AuthenticationFailed;
}
}
diff --git a/lib/std/crypto/aes_gcm.zig b/lib/std/crypto/aes_gcm.zig
index 30fd37e6a0..6eadcdee2f 100644
--- a/lib/std/crypto/aes_gcm.zig
+++ b/lib/std/crypto/aes_gcm.zig
@@ -91,7 +91,7 @@ fn AesGcm(comptime Aes: anytype) type {
acc |= (computed_tag[p] ^ tag[p]);
}
if (acc != 0) {
- mem.set(u8, m, 0xaa);
+ @memset(m.ptr, undefined, m.len);
return error.AuthenticationFailed;
}
diff --git a/lib/std/crypto/hkdf.zig b/lib/std/crypto/hkdf.zig
index 8de3052a0b..7102ffe780 100644
--- a/lib/std/crypto/hkdf.zig
+++ b/lib/std/crypto/hkdf.zig
@@ -13,38 +13,56 @@ pub const HkdfSha512 = Hkdf(hmac.sha2.HmacSha512);
/// derives one or more uniform keys from it.
pub fn Hkdf(comptime Hmac: type) type {
return struct {
+ /// Length of a master key, in bytes.
+ pub const prk_length = Hmac.mac_length;
+
/// Return a master key from a salt and initial keying material.
- pub fn extract(salt: []const u8, ikm: []const u8) [Hmac.mac_length]u8 {
- var prk: [Hmac.mac_length]u8 = undefined;
+ pub fn extract(salt: []const u8, ikm: []const u8) [prk_length]u8 {
+ var prk: [prk_length]u8 = undefined;
Hmac.create(&prk, ikm, salt);
return prk;
}
+ /// Initialize the creation of a master key from a salt
+ /// and keying material that can be added later, possibly in chunks.
+ /// Example:
+ /// ```
+ /// var prk: [hkdf.prk_length]u8 = undefined;
+ /// var hkdf = HkdfSha256.extractInit(salt);
+ /// hkdf.update(ikm1);
+ /// hkdf.update(ikm2);
+ /// hkdf.final(&prk);
+ /// ```
+ pub fn extractInit(salt: []const u8) Hmac {
+ return Hmac.init(salt);
+ }
+
/// Derive a subkey from a master key `prk` and a subkey description `ctx`.
- pub fn expand(out: []u8, ctx: []const u8, prk: [Hmac.mac_length]u8) void {
- assert(out.len < Hmac.mac_length * 255); // output size is too large for the Hkdf construction
+ pub fn expand(out: []u8, ctx: []const u8, prk: [prk_length]u8) void {
+ assert(out.len <= prk_length * 255); // output size is too large for the Hkdf construction
var i: usize = 0;
var counter = [1]u8{1};
- while (i + Hmac.mac_length <= out.len) : (i += Hmac.mac_length) {
+ while (i + prk_length <= out.len) : (i += prk_length) {
var st = Hmac.init(&prk);
if (i != 0) {
- st.update(out[i - Hmac.mac_length ..][0..Hmac.mac_length]);
+ st.update(out[i - prk_length ..][0..prk_length]);
}
st.update(ctx);
st.update(&counter);
- st.final(out[i..][0..Hmac.mac_length]);
- counter[0] += 1;
+ st.final(out[i..][0..prk_length]);
+ counter[0] +%= 1;
+ assert(counter[0] != 1);
}
- const left = out.len % Hmac.mac_length;
+ const left = out.len % prk_length;
if (left > 0) {
var st = Hmac.init(&prk);
if (i != 0) {
- st.update(out[i - Hmac.mac_length ..][0..Hmac.mac_length]);
+ st.update(out[i - prk_length ..][0..prk_length]);
}
st.update(ctx);
st.update(&counter);
- var tmp: [Hmac.mac_length]u8 = undefined;
- st.final(tmp[0..Hmac.mac_length]);
+ var tmp: [prk_length]u8 = undefined;
+ st.final(tmp[0..prk_length]);
mem.copy(u8, out[i..][0..left], tmp[0..left]);
}
}
@@ -63,4 +81,10 @@ test "Hkdf" {
var out: [42]u8 = undefined;
kdf.expand(&out, &context, prk);
try htest.assertEqual("3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", &out);
+
+ var hkdf = kdf.extractInit(&salt);
+ hkdf.update(&ikm);
+ var prk2: [kdf.prk_length]u8 = undefined;
+ hkdf.final(&prk2);
+ try htest.assertEqual("077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5", &prk2);
}
diff --git a/lib/std/crypto/pcurves/p256/p256_64.zig b/lib/std/crypto/pcurves/p256/p256_64.zig
index 4ea8f268f3..e8ba37e845 100644
--- a/lib/std/crypto/pcurves/p256/p256_64.zig
+++ b/lib/std/crypto/pcurves/p256/p256_64.zig
@@ -75,10 +75,10 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -97,10 +97,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
index 9c99d18ccf..ea102360cf 100644
--- a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
+++ b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
@@ -75,10 +75,10 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -97,10 +97,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/pcurves/p384/p384_64.zig b/lib/std/crypto/pcurves/p384/p384_64.zig
index bd39fc527a..45c12835b3 100644
--- a/lib/std/crypto/pcurves/p384/p384_64.zig
+++ b/lib/std/crypto/pcurves/p384/p384_64.zig
@@ -44,10 +44,10 @@ pub const NonMontgomeryDomainFieldElement = [6]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -66,10 +66,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
index e45e43f98c..0ce7727148 100644
--- a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
+++ b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
@@ -44,10 +44,10 @@ pub const NonMontgomeryDomainFieldElement = [6]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -66,10 +66,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
index 2309b48ac9..5643ea88d5 100644
--- a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
+++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
@@ -44,10 +44,10 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -66,10 +66,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
index 8e9687f0a1..aca1bd3063 100644
--- a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
+++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
@@ -44,10 +44,10 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @addWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @addWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @addWithOverflow(arg2, arg3);
+ const ov2 = @addWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function subborrowxU64 is a subtraction with borrow.
@@ -66,10 +66,10 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
- var t: u64 = undefined;
- const carry1 = @subWithOverflow(u64, arg2, arg3, &t);
- const carry2 = @subWithOverflow(u64, t, arg1, out1);
- out2.* = @boolToInt(carry1) | @boolToInt(carry2);
+ const ov1 = @subWithOverflow(arg2, arg3);
+ const ov2 = @subWithOverflow(ov1[0], arg1);
+ out1.* = ov2[0];
+ out2.* = ov1[1] | ov2[1];
}
/// The function mulxU64 is a multiplication, returning the full double-width result.
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index bba03a3fff..4b6965d040 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -110,9 +110,9 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
var found = false;
inline for (comptime meta.fields(HashResult)) |p| {
if (mem.eql(u8, p.name, param.key)) {
- switch (@typeInfo(p.field_type)) {
+ switch (@typeInfo(p.type)) {
.Int => @field(out, p.name) = fmt.parseUnsigned(
- p.field_type,
+ p.type,
param.value,
10,
) catch return Error.InvalidEncoding,
@@ -161,7 +161,7 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
// with default values
var expected_fields: usize = 0;
inline for (comptime meta.fields(HashResult)) |p| {
- if (@typeInfo(p.field_type) != .Optional and p.default_value == null) {
+ if (@typeInfo(p.type) != .Optional and p.default_value == null) {
expected_fields += 1;
}
}
@@ -223,7 +223,7 @@ fn serializeTo(params: anytype, out: anytype) !void {
{
const value = @field(params, p.name);
try out.writeAll(if (has_params) params_delimiter else fields_delimiter);
- if (@typeInfo(p.field_type) == .Struct) {
+ if (@typeInfo(p.type) == .Struct) {
var buf: [@TypeOf(value).max_encoded_length]u8 = undefined;
try out.print("{s}{s}{s}", .{ p.name, kv_delimiter, try value.toB64(&buf) });
} else {
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index c4cd86b0e4..2027403ee2 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -263,7 +263,9 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
while (j < 64) : (j += 1) {
xout[j] ^= buf[j];
}
- ctx[9] += @boolToInt(@addWithOverflow(u32, ctx[8], 1, &ctx[8]));
+ const ov = @addWithOverflow(ctx[8], 1);
+ ctx[8] = ov[0];
+ ctx[9] += ov[1];
}
if (i < in.len) {
salsaCore(x[0..], ctx, true);
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index 9cdf8edcf1..217dea3723 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -142,6 +142,11 @@ fn Sha2x32(comptime params: Sha2Params32) type {
d.total_len += b.len;
}
+ pub fn peek(d: Self) [digest_length]u8 {
+ var copy = d;
+ return copy.finalResult();
+ }
+
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
mem.set(u8, d.buf[d.buf_len..], 0);
@@ -175,6 +180,12 @@ fn Sha2x32(comptime params: Sha2Params32) type {
}
}
+ pub fn finalResult(d: *Self) [digest_length]u8 {
+ var result: [digest_length]u8 = undefined;
+ d.final(&result);
+ return result;
+ }
+
const W = [64]u32{
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
@@ -621,6 +632,11 @@ fn Sha2x64(comptime params: Sha2Params64) type {
d.total_len += b.len;
}
+ pub fn peek(d: Self) [digest_length]u8 {
+ var copy = d;
+ return copy.finalResult();
+ }
+
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
mem.set(u8, d.buf[d.buf_len..], 0);
@@ -654,6 +670,12 @@ fn Sha2x64(comptime params: Sha2Params64) type {
}
}
+ pub fn finalResult(d: *Self) [digest_length]u8 {
+ var result: [digest_length]u8 = undefined;
+ d.final(&result);
+ return result;
+ }
+
fn round(d: *Self, b: *const [128]u8) void {
var s: [80]u64 = undefined;
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 567b39bfc3..7735d7bc71 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -14,17 +14,19 @@ pub const Keccak_512 = Keccak(512, 0x01);
fn Keccak(comptime bits: usize, comptime delim: u8) type {
return struct {
const Self = @This();
- pub const block_length = 200;
+ /// The output length, in bytes.
pub const digest_length = bits / 8;
+ /// The block length, or rate, in bytes.
+ pub const block_length = 200 - bits / 4;
+ /// Keccak does not have any options.
pub const Options = struct {};
s: [200]u8,
offset: usize,
- rate: usize,
pub fn init(options: Options) Self {
_ = options;
- return Self{ .s = [_]u8{0} ** 200, .offset = 0, .rate = 200 - (bits / 4) };
+ return Self{ .s = [_]u8{0} ** 200, .offset = 0 };
}
pub fn hash(b: []const u8, out: *[digest_length]u8, options: Options) void {
@@ -36,7 +38,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
pub fn update(d: *Self, b: []const u8) void {
var ip: usize = 0;
var len = b.len;
- var rate = d.rate - d.offset;
+ var rate = block_length - d.offset;
var offset = d.offset;
// absorb
@@ -48,7 +50,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
ip += rate;
len -= rate;
- rate = d.rate;
+ rate = block_length;
offset = 0;
}
@@ -61,7 +63,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
pub fn final(d: *Self, out: *[digest_length]u8) void {
// padding
d.s[d.offset] ^= delim;
- d.s[d.rate - 1] ^= 0x80;
+ d.s[block_length - 1] ^= 0x80;
keccakF(1600, &d.s);
@@ -69,11 +71,11 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
var op: usize = 0;
var len: usize = bits / 8;
- while (len >= d.rate) {
- mem.copy(u8, out[op..], d.s[0..d.rate]);
+ while (len >= block_length) {
+ mem.copy(u8, out[op..], d.s[0..block_length]);
keccakF(1600, &d.s);
- op += d.rate;
- len -= d.rate;
+ op += block_length;
+ len -= block_length;
}
mem.copy(u8, out[op..], d.s[0..len]);
diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig
new file mode 100644
index 0000000000..7d89da8929
--- /dev/null
+++ b/lib/std/crypto/tls.zig
@@ -0,0 +1,494 @@
+//! Plaintext:
+//! * type: ContentType
+//! * legacy_record_version: u16 = 0x0303,
+//! * length: u16,
+//! - The length (in bytes) of the following TLSPlaintext.fragment. The
+//! length MUST NOT exceed 2^14 bytes.
+//! * fragment: opaque
+//! - the data being transmitted
+//!
+//! Ciphertext
+//! * ContentType opaque_type = application_data; /* 23 */
+//! * ProtocolVersion legacy_record_version = 0x0303; /* TLS v1.2 */
+//! * uint16 length;
+//! * opaque encrypted_record[TLSCiphertext.length];
+//!
+//! Handshake:
+//! * type: HandshakeType
+//! * length: u24
+//! * data: opaque
+//!
+//! ServerHello:
+//! * ProtocolVersion legacy_version = 0x0303;
+//! * Random random;
+//! * opaque legacy_session_id_echo<0..32>;
+//! * CipherSuite cipher_suite;
+//! * uint8 legacy_compression_method = 0;
+//! * Extension extensions<6..2^16-1>;
+//!
+//! Extension:
+//! * ExtensionType extension_type;
+//! * opaque extension_data<0..2^16-1>;
+
+const std = @import("../std.zig");
+const Tls = @This();
+const net = std.net;
+const mem = std.mem;
+const crypto = std.crypto;
+const assert = std.debug.assert;
+
+pub const Client = @import("tls/Client.zig");
+
+pub const record_header_len = 5;
+pub const max_ciphertext_len = (1 << 14) + 256;
+pub const max_ciphertext_record_len = max_ciphertext_len + record_header_len;
+pub const hello_retry_request_sequence = [32]u8{
+ 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
+ 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E, 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C,
+};
+
+pub const close_notify_alert = [_]u8{
+ @enumToInt(AlertLevel.warning),
+ @enumToInt(AlertDescription.close_notify),
+};
+
+pub const ProtocolVersion = enum(u16) {
+ tls_1_2 = 0x0303,
+ tls_1_3 = 0x0304,
+ _,
+};
+
+pub const ContentType = enum(u8) {
+ invalid = 0,
+ change_cipher_spec = 20,
+ alert = 21,
+ handshake = 22,
+ application_data = 23,
+ _,
+};
+
+pub const HandshakeType = enum(u8) {
+ client_hello = 1,
+ server_hello = 2,
+ new_session_ticket = 4,
+ end_of_early_data = 5,
+ encrypted_extensions = 8,
+ certificate = 11,
+ certificate_request = 13,
+ certificate_verify = 15,
+ finished = 20,
+ key_update = 24,
+ message_hash = 254,
+ _,
+};
+
+pub const ExtensionType = enum(u16) {
+ /// RFC 6066
+ server_name = 0,
+ /// RFC 6066
+ max_fragment_length = 1,
+ /// RFC 6066
+ status_request = 5,
+ /// RFC 8422, 7919
+ supported_groups = 10,
+ /// RFC 8446
+ signature_algorithms = 13,
+ /// RFC 5764
+ use_srtp = 14,
+ /// RFC 6520
+ heartbeat = 15,
+ /// RFC 7301
+ application_layer_protocol_negotiation = 16,
+ /// RFC 6962
+ signed_certificate_timestamp = 18,
+ /// RFC 7250
+ client_certificate_type = 19,
+ /// RFC 7250
+ server_certificate_type = 20,
+ /// RFC 7685
+ padding = 21,
+ /// RFC 8446
+ pre_shared_key = 41,
+ /// RFC 8446
+ early_data = 42,
+ /// RFC 8446
+ supported_versions = 43,
+ /// RFC 8446
+ cookie = 44,
+ /// RFC 8446
+ psk_key_exchange_modes = 45,
+ /// RFC 8446
+ certificate_authorities = 47,
+ /// RFC 8446
+ oid_filters = 48,
+ /// RFC 8446
+ post_handshake_auth = 49,
+ /// RFC 8446
+ signature_algorithms_cert = 50,
+ /// RFC 8446
+ key_share = 51,
+
+ _,
+};
+
+pub const AlertLevel = enum(u8) {
+ warning = 1,
+ fatal = 2,
+ _,
+};
+
+pub const AlertDescription = enum(u8) {
+ close_notify = 0,
+ unexpected_message = 10,
+ bad_record_mac = 20,
+ record_overflow = 22,
+ handshake_failure = 40,
+ bad_certificate = 42,
+ unsupported_certificate = 43,
+ certificate_revoked = 44,
+ certificate_expired = 45,
+ certificate_unknown = 46,
+ illegal_parameter = 47,
+ unknown_ca = 48,
+ access_denied = 49,
+ decode_error = 50,
+ decrypt_error = 51,
+ protocol_version = 70,
+ insufficient_security = 71,
+ internal_error = 80,
+ inappropriate_fallback = 86,
+ user_canceled = 90,
+ missing_extension = 109,
+ unsupported_extension = 110,
+ unrecognized_name = 112,
+ bad_certificate_status_response = 113,
+ unknown_psk_identity = 115,
+ certificate_required = 116,
+ no_application_protocol = 120,
+ _,
+};
+
+pub const SignatureScheme = enum(u16) {
+ // RSASSA-PKCS1-v1_5 algorithms
+ rsa_pkcs1_sha256 = 0x0401,
+ rsa_pkcs1_sha384 = 0x0501,
+ rsa_pkcs1_sha512 = 0x0601,
+
+ // ECDSA algorithms
+ ecdsa_secp256r1_sha256 = 0x0403,
+ ecdsa_secp384r1_sha384 = 0x0503,
+ ecdsa_secp521r1_sha512 = 0x0603,
+
+ // RSASSA-PSS algorithms with public key OID rsaEncryption
+ rsa_pss_rsae_sha256 = 0x0804,
+ rsa_pss_rsae_sha384 = 0x0805,
+ rsa_pss_rsae_sha512 = 0x0806,
+
+ // EdDSA algorithms
+ ed25519 = 0x0807,
+ ed448 = 0x0808,
+
+ // RSASSA-PSS algorithms with public key OID RSASSA-PSS
+ rsa_pss_pss_sha256 = 0x0809,
+ rsa_pss_pss_sha384 = 0x080a,
+ rsa_pss_pss_sha512 = 0x080b,
+
+ // Legacy algorithms
+ rsa_pkcs1_sha1 = 0x0201,
+ ecdsa_sha1 = 0x0203,
+
+ _,
+};
+
+pub const NamedGroup = enum(u16) {
+ // Elliptic Curve Groups (ECDHE)
+ secp256r1 = 0x0017,
+ secp384r1 = 0x0018,
+ secp521r1 = 0x0019,
+ x25519 = 0x001D,
+ x448 = 0x001E,
+
+ // Finite Field Groups (DHE)
+ ffdhe2048 = 0x0100,
+ ffdhe3072 = 0x0101,
+ ffdhe4096 = 0x0102,
+ ffdhe6144 = 0x0103,
+ ffdhe8192 = 0x0104,
+
+ _,
+};
+
+pub const CipherSuite = enum(u16) {
+ AES_128_GCM_SHA256 = 0x1301,
+ AES_256_GCM_SHA384 = 0x1302,
+ CHACHA20_POLY1305_SHA256 = 0x1303,
+ AES_128_CCM_SHA256 = 0x1304,
+ AES_128_CCM_8_SHA256 = 0x1305,
+ AEGIS_256_SHA384 = 0x1306,
+ AEGIS_128L_SHA256 = 0x1307,
+ _,
+};
+
+pub const CertificateType = enum(u8) {
+ X509 = 0,
+ RawPublicKey = 2,
+ _,
+};
+
+pub const KeyUpdateRequest = enum(u8) {
+ update_not_requested = 0,
+ update_requested = 1,
+ _,
+};
+
+pub fn HandshakeCipherT(comptime AeadType: type, comptime HashType: type) type {
+ return struct {
+ pub const AEAD = AeadType;
+ pub const Hash = HashType;
+ pub const Hmac = crypto.auth.hmac.Hmac(Hash);
+ pub const Hkdf = crypto.kdf.hkdf.Hkdf(Hmac);
+
+ handshake_secret: [Hkdf.prk_length]u8,
+ master_secret: [Hkdf.prk_length]u8,
+ client_handshake_key: [AEAD.key_length]u8,
+ server_handshake_key: [AEAD.key_length]u8,
+ client_finished_key: [Hmac.key_length]u8,
+ server_finished_key: [Hmac.key_length]u8,
+ client_handshake_iv: [AEAD.nonce_length]u8,
+ server_handshake_iv: [AEAD.nonce_length]u8,
+ transcript_hash: Hash,
+ };
+}
+
+pub const HandshakeCipher = union(enum) {
+ AES_128_GCM_SHA256: HandshakeCipherT(crypto.aead.aes_gcm.Aes128Gcm, crypto.hash.sha2.Sha256),
+ AES_256_GCM_SHA384: HandshakeCipherT(crypto.aead.aes_gcm.Aes256Gcm, crypto.hash.sha2.Sha384),
+ CHACHA20_POLY1305_SHA256: HandshakeCipherT(crypto.aead.chacha_poly.ChaCha20Poly1305, crypto.hash.sha2.Sha256),
+ AEGIS_256_SHA384: HandshakeCipherT(crypto.aead.aegis.Aegis256, crypto.hash.sha2.Sha384),
+ AEGIS_128L_SHA256: HandshakeCipherT(crypto.aead.aegis.Aegis128L, crypto.hash.sha2.Sha256),
+};
+
+pub fn ApplicationCipherT(comptime AeadType: type, comptime HashType: type) type {
+ return struct {
+ pub const AEAD = AeadType;
+ pub const Hash = HashType;
+ pub const Hmac = crypto.auth.hmac.Hmac(Hash);
+ pub const Hkdf = crypto.kdf.hkdf.Hkdf(Hmac);
+
+ client_secret: [Hash.digest_length]u8,
+ server_secret: [Hash.digest_length]u8,
+ client_key: [AEAD.key_length]u8,
+ server_key: [AEAD.key_length]u8,
+ client_iv: [AEAD.nonce_length]u8,
+ server_iv: [AEAD.nonce_length]u8,
+ };
+}
+
+/// Encryption parameters for application traffic.
+pub const ApplicationCipher = union(enum) {
+ AES_128_GCM_SHA256: ApplicationCipherT(crypto.aead.aes_gcm.Aes128Gcm, crypto.hash.sha2.Sha256),
+ AES_256_GCM_SHA384: ApplicationCipherT(crypto.aead.aes_gcm.Aes256Gcm, crypto.hash.sha2.Sha384),
+ CHACHA20_POLY1305_SHA256: ApplicationCipherT(crypto.aead.chacha_poly.ChaCha20Poly1305, crypto.hash.sha2.Sha256),
+ AEGIS_256_SHA384: ApplicationCipherT(crypto.aead.aegis.Aegis256, crypto.hash.sha2.Sha384),
+ AEGIS_128L_SHA256: ApplicationCipherT(crypto.aead.aegis.Aegis128L, crypto.hash.sha2.Sha256),
+};
+
+pub fn hkdfExpandLabel(
+ comptime Hkdf: type,
+ key: [Hkdf.prk_length]u8,
+ label: []const u8,
+ context: []const u8,
+ comptime len: usize,
+) [len]u8 {
+ const max_label_len = 255;
+ const max_context_len = 255;
+ const tls13 = "tls13 ";
+ var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined;
+ mem.writeIntBig(u16, buf[0..2], len);
+ buf[2] = @intCast(u8, tls13.len + label.len);
+ buf[3..][0..tls13.len].* = tls13.*;
+ var i: usize = 3 + tls13.len;
+ mem.copy(u8, buf[i..], label);
+ i += label.len;
+ buf[i] = @intCast(u8, context.len);
+ i += 1;
+ mem.copy(u8, buf[i..], context);
+ i += context.len;
+
+ var result: [len]u8 = undefined;
+ Hkdf.expand(&result, buf[0..i], key);
+ return result;
+}
+
+pub fn emptyHash(comptime Hash: type) [Hash.digest_length]u8 {
+ var result: [Hash.digest_length]u8 = undefined;
+ Hash.hash(&.{}, &result, .{});
+ return result;
+}
+
+pub fn hmac(comptime Hmac: type, message: []const u8, key: [Hmac.key_length]u8) [Hmac.mac_length]u8 {
+ var result: [Hmac.mac_length]u8 = undefined;
+ Hmac.create(&result, message, &key);
+ return result;
+}
+
+pub inline fn extension(comptime et: ExtensionType, bytes: anytype) [2 + 2 + bytes.len]u8 {
+ return int2(@enumToInt(et)) ++ array(1, bytes);
+}
+
+pub inline fn array(comptime elem_size: comptime_int, bytes: anytype) [2 + bytes.len]u8 {
+ comptime assert(bytes.len % elem_size == 0);
+ return int2(bytes.len) ++ bytes;
+}
+
+pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeOf(E) * tags.len]u8 {
+ assert(@sizeOf(E) == 2);
+ var result: [tags.len * 2]u8 = undefined;
+ for (tags) |elem, i| {
+ result[i * 2] = @truncate(u8, @enumToInt(elem) >> 8);
+ result[i * 2 + 1] = @truncate(u8, @enumToInt(elem));
+ }
+ return array(2, result);
+}
+
+pub inline fn int2(x: u16) [2]u8 {
+ return .{
+ @truncate(u8, x >> 8),
+ @truncate(u8, x),
+ };
+}
+
+pub inline fn int3(x: u24) [3]u8 {
+ return .{
+ @truncate(u8, x >> 16),
+ @truncate(u8, x >> 8),
+ @truncate(u8, x),
+ };
+}
+
+/// An abstraction to ensure that protocol-parsing code does not perform an
+/// out-of-bounds read.
+pub const Decoder = struct {
+ buf: []u8,
+ /// Points to the next byte in buffer that will be decoded.
+ idx: usize = 0,
+ /// Up to this point in `buf` we have already checked that `cap` is greater than it.
+ our_end: usize = 0,
+ /// Beyond this point in `buf` is extra tag-along bytes beyond the amount we
+ /// requested with `readAtLeast`.
+ their_end: usize = 0,
+ /// Points to the end within buffer that has been filled. Beyond this point
+ /// in buf is undefined bytes.
+ cap: usize = 0,
+ /// Debug helper to prevent illegal calls to read functions.
+ disable_reads: bool = false,
+
+ pub fn fromTheirSlice(buf: []u8) Decoder {
+ return .{
+ .buf = buf,
+ .their_end = buf.len,
+ .cap = buf.len,
+ .disable_reads = true,
+ };
+ }
+
+ /// Use this function to increase `their_end`.
+ pub fn readAtLeast(d: *Decoder, stream: anytype, their_amt: usize) !void {
+ assert(!d.disable_reads);
+ const existing_amt = d.cap - d.idx;
+ d.their_end = d.idx + their_amt;
+ if (their_amt <= existing_amt) return;
+ const request_amt = their_amt - existing_amt;
+ const dest = d.buf[d.cap..];
+ if (request_amt > dest.len) return error.TlsRecordOverflow;
+ const actual_amt = try stream.readAtLeast(dest, request_amt);
+ if (actual_amt < request_amt) return error.TlsConnectionTruncated;
+ d.cap += actual_amt;
+ }
+
+ /// Same as `readAtLeast` but also increases `our_end` by exactly `our_amt`.
+ /// Use when `our_amt` is calculated by us, not by them.
+ pub fn readAtLeastOurAmt(d: *Decoder, stream: anytype, our_amt: usize) !void {
+ assert(!d.disable_reads);
+ try readAtLeast(d, stream, our_amt);
+ d.our_end = d.idx + our_amt;
+ }
+
+ /// Use this function to increase `our_end`.
+ /// This should always be called with an amount provided by us, not them.
+ pub fn ensure(d: *Decoder, amt: usize) !void {
+ d.our_end = @max(d.idx + amt, d.our_end);
+ if (d.our_end > d.their_end) return error.TlsDecodeError;
+ }
+
+ /// Use this function to increase `idx`.
+ pub fn decode(d: *Decoder, comptime T: type) T {
+ switch (@typeInfo(T)) {
+ .Int => |info| switch (info.bits) {
+ 8 => {
+ skip(d, 1);
+ return d.buf[d.idx - 1];
+ },
+ 16 => {
+ skip(d, 2);
+ const b0: u16 = d.buf[d.idx - 2];
+ const b1: u16 = d.buf[d.idx - 1];
+ return (b0 << 8) | b1;
+ },
+ 24 => {
+ skip(d, 3);
+ const b0: u24 = d.buf[d.idx - 3];
+ const b1: u24 = d.buf[d.idx - 2];
+ const b2: u24 = d.buf[d.idx - 1];
+ return (b0 << 16) | (b1 << 8) | b2;
+ },
+ else => @compileError("unsupported int type: " ++ @typeName(T)),
+ },
+ .Enum => |info| {
+ const int = d.decode(info.tag_type);
+ if (info.is_exhaustive) @compileError("exhaustive enum cannot be used");
+ return @intToEnum(T, int);
+ },
+ else => @compileError("unsupported type: " ++ @typeName(T)),
+ }
+ }
+
+ /// Use this function to increase `idx`.
+ pub fn array(d: *Decoder, comptime len: usize) *[len]u8 {
+ skip(d, len);
+ return d.buf[d.idx - len ..][0..len];
+ }
+
+ /// Use this function to increase `idx`.
+ pub fn slice(d: *Decoder, len: usize) []u8 {
+ skip(d, len);
+ return d.buf[d.idx - len ..][0..len];
+ }
+
+ /// Use this function to increase `idx`.
+ pub fn skip(d: *Decoder, amt: usize) void {
+ d.idx += amt;
+ assert(d.idx <= d.our_end); // insufficient ensured bytes
+ }
+
+ pub fn eof(d: Decoder) bool {
+ assert(d.our_end <= d.their_end);
+ assert(d.idx <= d.our_end);
+ return d.idx == d.their_end;
+ }
+
+ /// Provide the length they claim, and receive a sub-decoder specific to that slice.
+ /// The parent decoder is advanced to the end.
+ pub fn sub(d: *Decoder, their_len: usize) !Decoder {
+ const end = d.idx + their_len;
+ if (end > d.their_end) return error.TlsDecodeError;
+ const sub_buf = d.buf[d.idx..end];
+ d.idx = end;
+ d.our_end = end;
+ return fromTheirSlice(sub_buf);
+ }
+
+ pub fn rest(d: Decoder) []u8 {
+ return d.buf[d.idx..d.cap];
+ }
+};
diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig
new file mode 100644
index 0000000000..44891a1973
--- /dev/null
+++ b/lib/std/crypto/tls/Client.zig
@@ -0,0 +1,1308 @@
+const std = @import("../../std.zig");
+const tls = std.crypto.tls;
+const Client = @This();
+const net = std.net;
+const mem = std.mem;
+const crypto = std.crypto;
+const assert = std.debug.assert;
+const Certificate = std.crypto.Certificate;
+
+const max_ciphertext_len = tls.max_ciphertext_len;
+const hkdfExpandLabel = tls.hkdfExpandLabel;
+const int2 = tls.int2;
+const int3 = tls.int3;
+const array = tls.array;
+const enum_array = tls.enum_array;
+
+read_seq: u64,
+write_seq: u64,
+/// The starting index of cleartext bytes inside `partially_read_buffer`.
+partial_cleartext_idx: u15,
+/// The ending index of cleartext bytes inside `partially_read_buffer` as well
+/// as the starting index of ciphertext bytes.
+partial_ciphertext_idx: u15,
+/// The ending index of ciphertext bytes inside `partially_read_buffer`.
+partial_ciphertext_end: u15,
+/// When this is true, the stream may still not be at the end because there
+/// may be data in `partially_read_buffer`.
+received_close_notify: bool,
+/// By default, reaching the end-of-stream when reading from the server will
+/// cause `error.TlsConnectionTruncated` to be returned, unless a close_notify
+/// message has been received. By setting this flag to `true`, instead, the
+/// end-of-stream will be forwarded to the application layer above TLS.
+/// This makes the application vulnerable to truncation attacks unless the
+/// application layer itself verifies that the amount of data received equals
+/// the amount of data expected, such as HTTP with the Content-Length header.
+allow_truncation_attacks: bool = false,
+application_cipher: tls.ApplicationCipher,
+/// The size is enough to contain exactly one TLSCiphertext record.
+/// This buffer is segmented into four parts:
+/// 0. unused
+/// 1. cleartext
+/// 2. ciphertext
+/// 3. unused
+/// The fields `partial_cleartext_idx`, `partial_ciphertext_idx`, and
+/// `partial_ciphertext_end` describe the span of the segments.
+partially_read_buffer: [tls.max_ciphertext_record_len]u8,
+
+/// This is an example of the type that is needed by the read and write
+/// functions. It can have any fields but it must at least have these
+/// functions.
+///
+/// Note that `std.net.Stream` conforms to this interface.
+///
+/// This declaration serves as documentation only.
+pub const StreamInterface = struct {
+ /// Can be any error set.
+ pub const ReadError = error{};
+
+ /// Returns the number of bytes read. The number read may be less than the
+ /// buffer space provided. End-of-stream is indicated by a return value of 0.
+ ///
+ /// The `iovecs` parameter is mutable because so that function may to
+ /// mutate the fields in order to handle partial reads from the underlying
+ /// stream layer.
+ pub fn readv(this: @This(), iovecs: []std.os.iovec) ReadError!usize {
+ _ = .{ this, iovecs };
+ @panic("unimplemented");
+ }
+
+ /// Can be any error set.
+ pub const WriteError = error{};
+
+ /// Returns the number of bytes read, which may be less than the buffer
+ /// space provided. A short read does not indicate end-of-stream.
+ pub fn writev(this: @This(), iovecs: []const std.os.iovec_const) WriteError!usize {
+ _ = .{ this, iovecs };
+ @panic("unimplemented");
+ }
+
+ /// Returns the number of bytes read, which may be less than the buffer
+ /// space provided, indicating end-of-stream.
+ /// The `iovecs` parameter is mutable in case this function needs to mutate
+ /// the fields in order to handle partial writes from the underlying layer.
+ pub fn writevAll(this: @This(), iovecs: []std.os.iovec_const) WriteError!usize {
+ // This can be implemented in terms of writev, or specialized if desired.
+ _ = .{ this, iovecs };
+ @panic("unimplemented");
+ }
+};
+
+/// Initiates a TLS handshake and establishes a TLSv1.3 session with `stream`, which
+/// must conform to `StreamInterface`.
+///
+/// `host` is only borrowed during this function call.
+pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) !Client {
+ const host_len = @intCast(u16, host.len);
+
+ var random_buffer: [128]u8 = undefined;
+ crypto.random.bytes(&random_buffer);
+ const hello_rand = random_buffer[0..32].*;
+ const legacy_session_id = random_buffer[32..64].*;
+ const x25519_kp_seed = random_buffer[64..96].*;
+ const secp256r1_kp_seed = random_buffer[96..128].*;
+
+ const x25519_kp = crypto.dh.X25519.KeyPair.create(x25519_kp_seed) catch |err| switch (err) {
+ // Only possible to happen if the private key is all zeroes.
+ error.IdentityElement => return error.InsufficientEntropy,
+ };
+ const secp256r1_kp = crypto.sign.ecdsa.EcdsaP256Sha256.KeyPair.create(secp256r1_kp_seed) catch |err| switch (err) {
+ // Only possible to happen if the private key is all zeroes.
+ error.IdentityElement => return error.InsufficientEntropy,
+ };
+
+ const extensions_payload =
+ tls.extension(.supported_versions, [_]u8{
+ 0x02, // byte length of supported versions
+ 0x03, 0x04, // TLS 1.3
+ }) ++ tls.extension(.signature_algorithms, enum_array(tls.SignatureScheme, &.{
+ .ecdsa_secp256r1_sha256,
+ .ecdsa_secp384r1_sha384,
+ .ecdsa_secp521r1_sha512,
+ .rsa_pss_rsae_sha256,
+ .rsa_pss_rsae_sha384,
+ .rsa_pss_rsae_sha512,
+ .rsa_pkcs1_sha256,
+ .rsa_pkcs1_sha384,
+ .rsa_pkcs1_sha512,
+ .ed25519,
+ })) ++ tls.extension(.supported_groups, enum_array(tls.NamedGroup, &.{
+ .secp256r1,
+ .x25519,
+ })) ++ tls.extension(
+ .key_share,
+ array(1, int2(@enumToInt(tls.NamedGroup.x25519)) ++
+ array(1, x25519_kp.public_key) ++
+ int2(@enumToInt(tls.NamedGroup.secp256r1)) ++
+ array(1, secp256r1_kp.public_key.toUncompressedSec1())),
+ ) ++
+ int2(@enumToInt(tls.ExtensionType.server_name)) ++
+ int2(host_len + 5) ++ // byte length of this extension payload
+ int2(host_len + 3) ++ // server_name_list byte count
+ [1]u8{0x00} ++ // name_type
+ int2(host_len);
+
+ const extensions_header =
+ int2(@intCast(u16, extensions_payload.len + host_len)) ++
+ extensions_payload;
+
+ const legacy_compression_methods = 0x0100;
+
+ const client_hello =
+ int2(@enumToInt(tls.ProtocolVersion.tls_1_2)) ++
+ hello_rand ++
+ [1]u8{32} ++ legacy_session_id ++
+ cipher_suites ++
+ int2(legacy_compression_methods) ++
+ extensions_header;
+
+ const out_handshake =
+ [_]u8{@enumToInt(tls.HandshakeType.client_hello)} ++
+ int3(@intCast(u24, client_hello.len + host_len)) ++
+ client_hello;
+
+ const plaintext_header = [_]u8{
+ @enumToInt(tls.ContentType.handshake),
+ 0x03, 0x01, // legacy_record_version
+ } ++ int2(@intCast(u16, out_handshake.len + host_len)) ++ out_handshake;
+
+ {
+ var iovecs = [_]std.os.iovec_const{
+ .{
+ .iov_base = &plaintext_header,
+ .iov_len = plaintext_header.len,
+ },
+ .{
+ .iov_base = host.ptr,
+ .iov_len = host.len,
+ },
+ };
+ try stream.writevAll(&iovecs);
+ }
+
+ const client_hello_bytes1 = plaintext_header[5..];
+
+ var handshake_cipher: tls.HandshakeCipher = undefined;
+ var handshake_buffer: [8000]u8 = undefined;
+ var d: tls.Decoder = .{ .buf = &handshake_buffer };
+ {
+ try d.readAtLeastOurAmt(stream, tls.record_header_len);
+ const ct = d.decode(tls.ContentType);
+ d.skip(2); // legacy_record_version
+ const record_len = d.decode(u16);
+ try d.readAtLeast(stream, record_len);
+ const server_hello_fragment = d.buf[d.idx..][0..record_len];
+ var ptd = try d.sub(record_len);
+ switch (ct) {
+ .alert => {
+ try ptd.ensure(2);
+ const level = ptd.decode(tls.AlertLevel);
+ const desc = ptd.decode(tls.AlertDescription);
+ _ = level;
+ _ = desc;
+ return error.TlsAlert;
+ },
+ .handshake => {
+ try ptd.ensure(4);
+ const handshake_type = ptd.decode(tls.HandshakeType);
+ if (handshake_type != .server_hello) return error.TlsUnexpectedMessage;
+ const length = ptd.decode(u24);
+ var hsd = try ptd.sub(length);
+ try hsd.ensure(2 + 32 + 1 + 32 + 2 + 1 + 2);
+ const legacy_version = hsd.decode(u16);
+ const random = hsd.array(32);
+ if (mem.eql(u8, random, &tls.hello_retry_request_sequence)) {
+ // This is a HelloRetryRequest message. This client implementation
+ // does not expect to get one.
+ return error.TlsUnexpectedMessage;
+ }
+ const legacy_session_id_echo_len = hsd.decode(u8);
+ if (legacy_session_id_echo_len != 32) return error.TlsIllegalParameter;
+ const legacy_session_id_echo = hsd.array(32);
+ if (!mem.eql(u8, legacy_session_id_echo, &legacy_session_id))
+ return error.TlsIllegalParameter;
+ const cipher_suite_tag = hsd.decode(tls.CipherSuite);
+ hsd.skip(1); // legacy_compression_method
+ const extensions_size = hsd.decode(u16);
+ var all_extd = try hsd.sub(extensions_size);
+ var supported_version: u16 = 0;
+ var shared_key: [32]u8 = undefined;
+ var have_shared_key = false;
+ while (!all_extd.eof()) {
+ try all_extd.ensure(2 + 2);
+ const et = all_extd.decode(tls.ExtensionType);
+ const ext_size = all_extd.decode(u16);
+ var extd = try all_extd.sub(ext_size);
+ switch (et) {
+ .supported_versions => {
+ if (supported_version != 0) return error.TlsIllegalParameter;
+ try extd.ensure(2);
+ supported_version = extd.decode(u16);
+ },
+ .key_share => {
+ if (have_shared_key) return error.TlsIllegalParameter;
+ have_shared_key = true;
+ try extd.ensure(4);
+ const named_group = extd.decode(tls.NamedGroup);
+ const key_size = extd.decode(u16);
+ try extd.ensure(key_size);
+ switch (named_group) {
+ .x25519 => {
+ if (key_size != 32) return error.TlsIllegalParameter;
+ const server_pub_key = extd.array(32);
+
+ shared_key = crypto.dh.X25519.scalarmult(
+ x25519_kp.secret_key,
+ server_pub_key.*,
+ ) catch return error.TlsDecryptFailure;
+ },
+ .secp256r1 => {
+ const server_pub_key = extd.slice(key_size);
+
+ const PublicKey = crypto.sign.ecdsa.EcdsaP256Sha256.PublicKey;
+ const pk = PublicKey.fromSec1(server_pub_key) catch {
+ return error.TlsDecryptFailure;
+ };
+ const mul = pk.p.mulPublic(secp256r1_kp.secret_key.bytes, .Big) catch {
+ return error.TlsDecryptFailure;
+ };
+ shared_key = mul.affineCoordinates().x.toBytes(.Big);
+ },
+ else => {
+ return error.TlsIllegalParameter;
+ },
+ }
+ },
+ else => {},
+ }
+ }
+ if (!have_shared_key) return error.TlsIllegalParameter;
+
+ const tls_version = if (supported_version == 0) legacy_version else supported_version;
+ if (tls_version != @enumToInt(tls.ProtocolVersion.tls_1_3))
+ return error.TlsIllegalParameter;
+
+ switch (cipher_suite_tag) {
+ inline .AES_128_GCM_SHA256,
+ .AES_256_GCM_SHA384,
+ .CHACHA20_POLY1305_SHA256,
+ .AEGIS_256_SHA384,
+ .AEGIS_128L_SHA256,
+ => |tag| {
+ const P = std.meta.TagPayloadByName(tls.HandshakeCipher, @tagName(tag));
+ handshake_cipher = @unionInit(tls.HandshakeCipher, @tagName(tag), .{
+ .handshake_secret = undefined,
+ .master_secret = undefined,
+ .client_handshake_key = undefined,
+ .server_handshake_key = undefined,
+ .client_finished_key = undefined,
+ .server_finished_key = undefined,
+ .client_handshake_iv = undefined,
+ .server_handshake_iv = undefined,
+ .transcript_hash = P.Hash.init(.{}),
+ });
+ const p = &@field(handshake_cipher, @tagName(tag));
+ p.transcript_hash.update(client_hello_bytes1); // Client Hello part 1
+ p.transcript_hash.update(host); // Client Hello part 2
+ p.transcript_hash.update(server_hello_fragment);
+ const hello_hash = p.transcript_hash.peek();
+ const zeroes = [1]u8{0} ** P.Hash.digest_length;
+ const early_secret = P.Hkdf.extract(&[1]u8{0}, &zeroes);
+ const empty_hash = tls.emptyHash(P.Hash);
+ const hs_derived_secret = hkdfExpandLabel(P.Hkdf, early_secret, "derived", &empty_hash, P.Hash.digest_length);
+ p.handshake_secret = P.Hkdf.extract(&hs_derived_secret, &shared_key);
+ const ap_derived_secret = hkdfExpandLabel(P.Hkdf, p.handshake_secret, "derived", &empty_hash, P.Hash.digest_length);
+ p.master_secret = P.Hkdf.extract(&ap_derived_secret, &zeroes);
+ const client_secret = hkdfExpandLabel(P.Hkdf, p.handshake_secret, "c hs traffic", &hello_hash, P.Hash.digest_length);
+ const server_secret = hkdfExpandLabel(P.Hkdf, p.handshake_secret, "s hs traffic", &hello_hash, P.Hash.digest_length);
+ p.client_finished_key = hkdfExpandLabel(P.Hkdf, client_secret, "finished", "", P.Hmac.key_length);
+ p.server_finished_key = hkdfExpandLabel(P.Hkdf, server_secret, "finished", "", P.Hmac.key_length);
+ p.client_handshake_key = hkdfExpandLabel(P.Hkdf, client_secret, "key", "", P.AEAD.key_length);
+ p.server_handshake_key = hkdfExpandLabel(P.Hkdf, server_secret, "key", "", P.AEAD.key_length);
+ p.client_handshake_iv = hkdfExpandLabel(P.Hkdf, client_secret, "iv", "", P.AEAD.nonce_length);
+ p.server_handshake_iv = hkdfExpandLabel(P.Hkdf, server_secret, "iv", "", P.AEAD.nonce_length);
+ },
+ else => {
+ return error.TlsIllegalParameter;
+ },
+ }
+ },
+ else => return error.TlsUnexpectedMessage,
+ }
+ }
+
+ // This is used for two purposes:
+ // * Detect whether a certificate is the first one presented, in which case
+ // we need to verify the host name.
+ // * Flip back and forth between the two cleartext buffers in order to keep
+ // the previous certificate in memory so that it can be verified by the
+ // next one.
+ var cert_index: usize = 0;
+ var read_seq: u64 = 0;
+ var prev_cert: Certificate.Parsed = undefined;
+ // Set to true once a trust chain has been established from the first
+ // certificate to a root CA.
+ const HandshakeState = enum {
+ /// In this state we expect only an encrypted_extensions message.
+ encrypted_extensions,
+ /// In this state we expect certificate messages.
+ certificate,
+ /// In this state we expect certificate or certificate_verify messages.
+ /// certificate messages are ignored since the trust chain is already
+ /// established.
+ trust_chain_established,
+ /// In this state, we expect only the finished message.
+ finished,
+ };
+ var handshake_state: HandshakeState = .encrypted_extensions;
+ var cleartext_bufs: [2][8000]u8 = undefined;
+ var main_cert_pub_key_algo: Certificate.AlgorithmCategory = undefined;
+ var main_cert_pub_key_buf: [300]u8 = undefined;
+ var main_cert_pub_key_len: u16 = undefined;
+ const now_sec = std.time.timestamp();
+
+ while (true) {
+ try d.readAtLeastOurAmt(stream, tls.record_header_len);
+ const record_header = d.buf[d.idx..][0..5];
+ const ct = d.decode(tls.ContentType);
+ d.skip(2); // legacy_version
+ const record_len = d.decode(u16);
+ try d.readAtLeast(stream, record_len);
+ var record_decoder = try d.sub(record_len);
+ switch (ct) {
+ .change_cipher_spec => {
+ try record_decoder.ensure(1);
+ if (record_decoder.decode(u8) != 0x01) return error.TlsIllegalParameter;
+ },
+ .application_data => {
+ const cleartext_buf = &cleartext_bufs[cert_index % 2];
+
+ const cleartext = switch (handshake_cipher) {
+ inline else => |*p| c: {
+ const P = @TypeOf(p.*);
+ const ciphertext_len = record_len - P.AEAD.tag_length;
+ try record_decoder.ensure(ciphertext_len + P.AEAD.tag_length);
+ const ciphertext = record_decoder.slice(ciphertext_len);
+ if (ciphertext.len > cleartext_buf.len) return error.TlsRecordOverflow;
+ const cleartext = cleartext_buf[0..ciphertext.len];
+ const auth_tag = record_decoder.array(P.AEAD.tag_length).*;
+ const V = @Vector(P.AEAD.nonce_length, u8);
+ const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
+ const operand: V = pad ++ @bitCast([8]u8, big(read_seq));
+ read_seq += 1;
+ const nonce = @as(V, p.server_handshake_iv) ^ operand;
+ P.AEAD.decrypt(cleartext, ciphertext, auth_tag, record_header, nonce, p.server_handshake_key) catch
+ return error.TlsBadRecordMac;
+ break :c cleartext;
+ },
+ };
+
+ const inner_ct = @intToEnum(tls.ContentType, cleartext[cleartext.len - 1]);
+ if (inner_ct != .handshake) return error.TlsUnexpectedMessage;
+
+ var ctd = tls.Decoder.fromTheirSlice(cleartext[0 .. cleartext.len - 1]);
+ while (true) {
+ try ctd.ensure(4);
+ const handshake_type = ctd.decode(tls.HandshakeType);
+ const handshake_len = ctd.decode(u24);
+ var hsd = try ctd.sub(handshake_len);
+ const wrapped_handshake = ctd.buf[ctd.idx - handshake_len - 4 .. ctd.idx];
+ const handshake = ctd.buf[ctd.idx - handshake_len .. ctd.idx];
+ switch (handshake_type) {
+ .encrypted_extensions => {
+ if (handshake_state != .encrypted_extensions) return error.TlsUnexpectedMessage;
+ handshake_state = .certificate;
+ switch (handshake_cipher) {
+ inline else => |*p| p.transcript_hash.update(wrapped_handshake),
+ }
+ try hsd.ensure(2);
+ const total_ext_size = hsd.decode(u16);
+ var all_extd = try hsd.sub(total_ext_size);
+ while (!all_extd.eof()) {
+ try all_extd.ensure(4);
+ const et = all_extd.decode(tls.ExtensionType);
+ const ext_size = all_extd.decode(u16);
+ var extd = try all_extd.sub(ext_size);
+ _ = extd;
+ switch (et) {
+ .server_name => {},
+ else => {},
+ }
+ }
+ },
+ .certificate => cert: {
+ switch (handshake_cipher) {
+ inline else => |*p| p.transcript_hash.update(wrapped_handshake),
+ }
+ switch (handshake_state) {
+ .certificate => {},
+ .trust_chain_established => break :cert,
+ else => return error.TlsUnexpectedMessage,
+ }
+ try hsd.ensure(1 + 4);
+ const cert_req_ctx_len = hsd.decode(u8);
+ if (cert_req_ctx_len != 0) return error.TlsIllegalParameter;
+ const certs_size = hsd.decode(u24);
+ var certs_decoder = try hsd.sub(certs_size);
+ while (!certs_decoder.eof()) {
+ try certs_decoder.ensure(3);
+ const cert_size = certs_decoder.decode(u24);
+ var certd = try certs_decoder.sub(cert_size);
+
+ const subject_cert: Certificate = .{
+ .buffer = certd.buf,
+ .index = @intCast(u32, certd.idx),
+ };
+ const subject = try subject_cert.parse();
+ if (cert_index == 0) {
+ // Verify the host on the first certificate.
+ try subject.verifyHostName(host);
+
+ // Keep track of the public key for the
+ // certificate_verify message later.
+ main_cert_pub_key_algo = subject.pub_key_algo;
+ const pub_key = subject.pubKey();
+ if (pub_key.len > main_cert_pub_key_buf.len)
+ return error.CertificatePublicKeyInvalid;
+ @memcpy(&main_cert_pub_key_buf, pub_key.ptr, pub_key.len);
+ main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
+ } else {
+ try prev_cert.verify(subject, now_sec);
+ }
+
+ if (ca_bundle.verify(subject, now_sec)) |_| {
+ handshake_state = .trust_chain_established;
+ break :cert;
+ } else |err| switch (err) {
+ error.CertificateIssuerNotFound => {},
+ else => |e| return e,
+ }
+
+ prev_cert = subject;
+ cert_index += 1;
+
+ try certs_decoder.ensure(2);
+ const total_ext_size = certs_decoder.decode(u16);
+ var all_extd = try certs_decoder.sub(total_ext_size);
+ _ = all_extd;
+ }
+ },
+ .certificate_verify => {
+ switch (handshake_state) {
+ .trust_chain_established => handshake_state = .finished,
+ .certificate => return error.TlsCertificateNotVerified,
+ else => return error.TlsUnexpectedMessage,
+ }
+
+ try hsd.ensure(4);
+ const scheme = hsd.decode(tls.SignatureScheme);
+ const sig_len = hsd.decode(u16);
+ try hsd.ensure(sig_len);
+ const encoded_sig = hsd.slice(sig_len);
+ const max_digest_len = 64;
+ var verify_buffer =
+ ([1]u8{0x20} ** 64) ++
+ "TLS 1.3, server CertificateVerify\x00".* ++
+ @as([max_digest_len]u8, undefined);
+
+ const verify_bytes = switch (handshake_cipher) {
+ inline else => |*p| v: {
+ const transcript_digest = p.transcript_hash.peek();
+ verify_buffer[verify_buffer.len - max_digest_len ..][0..transcript_digest.len].* = transcript_digest;
+ p.transcript_hash.update(wrapped_handshake);
+ break :v verify_buffer[0 .. verify_buffer.len - max_digest_len + transcript_digest.len];
+ },
+ };
+ const main_cert_pub_key = main_cert_pub_key_buf[0..main_cert_pub_key_len];
+
+ switch (scheme) {
+ inline .ecdsa_secp256r1_sha256,
+ .ecdsa_secp384r1_sha384,
+ => |comptime_scheme| {
+ if (main_cert_pub_key_algo != .X9_62_id_ecPublicKey)
+ return error.TlsBadSignatureScheme;
+ const Ecdsa = SchemeEcdsa(comptime_scheme);
+ const sig = try Ecdsa.Signature.fromDer(encoded_sig);
+ const key = try Ecdsa.PublicKey.fromSec1(main_cert_pub_key);
+ try sig.verify(verify_bytes, key);
+ },
+ .rsa_pss_rsae_sha256 => {
+ if (main_cert_pub_key_algo != .rsaEncryption)
+ return error.TlsBadSignatureScheme;
+
+ const Hash = crypto.hash.sha2.Sha256;
+ const rsa = Certificate.rsa;
+ const components = try rsa.PublicKey.parseDer(main_cert_pub_key);
+ const exponent = components.exponent;
+ const modulus = components.modulus;
+ var rsa_mem_buf: [512 * 32]u8 = undefined;
+ var fba = std.heap.FixedBufferAllocator.init(&rsa_mem_buf);
+ const ally = fba.allocator();
+ switch (modulus.len) {
+ inline 128, 256, 512 => |modulus_len| {
+ const key = try rsa.PublicKey.fromBytes(exponent, modulus, ally);
+ const sig = rsa.PSSSignature.fromBytes(modulus_len, encoded_sig);
+ try rsa.PSSSignature.verify(modulus_len, sig, verify_bytes, key, Hash, ally);
+ },
+ else => {
+ return error.TlsBadRsaSignatureBitCount;
+ },
+ }
+ },
+ else => {
+ return error.TlsBadSignatureScheme;
+ },
+ }
+ },
+ .finished => {
+ if (handshake_state != .finished) return error.TlsUnexpectedMessage;
+ // This message is to trick buggy proxies into behaving correctly.
+ const client_change_cipher_spec_msg = [_]u8{
+ @enumToInt(tls.ContentType.change_cipher_spec),
+ 0x03, 0x03, // legacy protocol version
+ 0x00, 0x01, // length
+ 0x01,
+ };
+ const app_cipher = switch (handshake_cipher) {
+ inline else => |*p, tag| c: {
+ const P = @TypeOf(p.*);
+ const finished_digest = p.transcript_hash.peek();
+ p.transcript_hash.update(wrapped_handshake);
+ const expected_server_verify_data = tls.hmac(P.Hmac, &finished_digest, p.server_finished_key);
+ if (!mem.eql(u8, &expected_server_verify_data, handshake))
+ return error.TlsDecryptError;
+ const handshake_hash = p.transcript_hash.finalResult();
+ const verify_data = tls.hmac(P.Hmac, &handshake_hash, p.client_finished_key);
+ const out_cleartext = [_]u8{
+ @enumToInt(tls.HandshakeType.finished),
+ 0, 0, verify_data.len, // length
+ } ++ verify_data ++ [1]u8{@enumToInt(tls.ContentType.handshake)};
+
+ const wrapped_len = out_cleartext.len + P.AEAD.tag_length;
+
+ var finished_msg = [_]u8{
+ @enumToInt(tls.ContentType.application_data),
+ 0x03, 0x03, // legacy protocol version
+ 0, wrapped_len, // byte length of encrypted record
+ } ++ @as([wrapped_len]u8, undefined);
+
+ const ad = finished_msg[0..5];
+ const ciphertext = finished_msg[5..][0..out_cleartext.len];
+ const auth_tag = finished_msg[finished_msg.len - P.AEAD.tag_length ..];
+ const nonce = p.client_handshake_iv;
+ P.AEAD.encrypt(ciphertext, auth_tag, &out_cleartext, ad, nonce, p.client_handshake_key);
+
+ const both_msgs = client_change_cipher_spec_msg ++ finished_msg;
+ try stream.writeAll(&both_msgs);
+
+ const client_secret = hkdfExpandLabel(P.Hkdf, p.master_secret, "c ap traffic", &handshake_hash, P.Hash.digest_length);
+ const server_secret = hkdfExpandLabel(P.Hkdf, p.master_secret, "s ap traffic", &handshake_hash, P.Hash.digest_length);
+ break :c @unionInit(tls.ApplicationCipher, @tagName(tag), .{
+ .client_secret = client_secret,
+ .server_secret = server_secret,
+ .client_key = hkdfExpandLabel(P.Hkdf, client_secret, "key", "", P.AEAD.key_length),
+ .server_key = hkdfExpandLabel(P.Hkdf, server_secret, "key", "", P.AEAD.key_length),
+ .client_iv = hkdfExpandLabel(P.Hkdf, client_secret, "iv", "", P.AEAD.nonce_length),
+ .server_iv = hkdfExpandLabel(P.Hkdf, server_secret, "iv", "", P.AEAD.nonce_length),
+ });
+ },
+ };
+ const leftover = d.rest();
+ var client: Client = .{
+ .read_seq = 0,
+ .write_seq = 0,
+ .partial_cleartext_idx = 0,
+ .partial_ciphertext_idx = 0,
+ .partial_ciphertext_end = @intCast(u15, leftover.len),
+ .received_close_notify = false,
+ .application_cipher = app_cipher,
+ .partially_read_buffer = undefined,
+ };
+ mem.copy(u8, &client.partially_read_buffer, leftover);
+ return client;
+ },
+ else => {
+ return error.TlsUnexpectedMessage;
+ },
+ }
+ if (ctd.eof()) break;
+ }
+ },
+ else => {
+ return error.TlsUnexpectedMessage;
+ },
+ }
+ }
+}
+
+/// Sends TLS-encrypted data to `stream`, which must conform to `StreamInterface`.
+/// Returns the number of plaintext bytes sent, which may be fewer than `bytes.len`.
+pub fn write(c: *Client, stream: anytype, bytes: []const u8) !usize {
+ return writeEnd(c, stream, bytes, false);
+}
+
+/// Sends TLS-encrypted data to `stream`, which must conform to `StreamInterface`.
+pub fn writeAll(c: *Client, stream: anytype, bytes: []const u8) !void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try c.write(stream, bytes[index..]);
+ }
+}
+
+/// Sends TLS-encrypted data to `stream`, which must conform to `StreamInterface`.
+/// If `end` is true, then this function additionally sends a `close_notify` alert,
+/// which is necessary for the server to distinguish between a properly finished
+/// TLS session, or a truncation attack.
+pub fn writeAllEnd(c: *Client, stream: anytype, bytes: []const u8, end: bool) !void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try c.writeEnd(stream, bytes[index..], end);
+ }
+}
+
+/// Sends TLS-encrypted data to `stream`, which must conform to `StreamInterface`.
+/// Returns the number of plaintext bytes sent, which may be fewer than `bytes.len`.
+/// If `end` is true, then this function additionally sends a `close_notify` alert,
+/// which is necessary for the server to distinguish between a properly finished
+/// TLS session, or a truncation attack.
+pub fn writeEnd(c: *Client, stream: anytype, bytes: []const u8, end: bool) !usize {
+ var ciphertext_buf: [tls.max_ciphertext_record_len * 4]u8 = undefined;
+ var iovecs_buf: [6]std.os.iovec_const = undefined;
+ var prepared = prepareCiphertextRecord(c, &iovecs_buf, &ciphertext_buf, bytes, .application_data);
+ if (end) {
+ prepared.iovec_end += prepareCiphertextRecord(
+ c,
+ iovecs_buf[prepared.iovec_end..],
+ ciphertext_buf[prepared.ciphertext_end..],
+ &tls.close_notify_alert,
+ .alert,
+ ).iovec_end;
+ }
+
+ const iovec_end = prepared.iovec_end;
+ const overhead_len = prepared.overhead_len;
+
+ // Ideally we would call writev exactly once here, however, we must ensure
+ // that we don't return with a record partially written.
+ var i: usize = 0;
+ var total_amt: usize = 0;
+ while (true) {
+ var amt = try stream.writev(iovecs_buf[i..iovec_end]);
+ while (amt >= iovecs_buf[i].iov_len) {
+ const encrypted_amt = iovecs_buf[i].iov_len;
+ total_amt += encrypted_amt - overhead_len;
+ amt -= encrypted_amt;
+ i += 1;
+ // Rely on the property that iovecs delineate records, meaning that
+ // if amt equals zero here, we have fortunately found ourselves
+ // with a short read that aligns at the record boundary.
+ if (i >= iovec_end) return total_amt;
+ // We also cannot return on a vector boundary if the final close_notify is
+ // not sent; otherwise the caller would not know to retry the call.
+ if (amt == 0 and (!end or i < iovec_end - 1)) return total_amt;
+ }
+ iovecs_buf[i].iov_base += amt;
+ iovecs_buf[i].iov_len -= amt;
+ }
+}
+
+fn prepareCiphertextRecord(
+ c: *Client,
+ iovecs: []std.os.iovec_const,
+ ciphertext_buf: []u8,
+ bytes: []const u8,
+ inner_content_type: tls.ContentType,
+) struct {
+ iovec_end: usize,
+ ciphertext_end: usize,
+ /// How many bytes are taken up by overhead per record.
+ overhead_len: usize,
+} {
+ // Due to the trailing inner content type byte in the ciphertext, we need
+ // an additional buffer for storing the cleartext into before encrypting.
+ var cleartext_buf: [max_ciphertext_len]u8 = undefined;
+ var ciphertext_end: usize = 0;
+ var iovec_end: usize = 0;
+ var bytes_i: usize = 0;
+ switch (c.application_cipher) {
+ inline else => |*p| {
+ const P = @TypeOf(p.*);
+ const V = @Vector(P.AEAD.nonce_length, u8);
+ const overhead_len = tls.record_header_len + P.AEAD.tag_length + 1;
+ const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len;
+ while (true) {
+ const encrypted_content_len = @intCast(u16, @min(
+ @min(bytes.len - bytes_i, max_ciphertext_len - 1),
+ ciphertext_buf.len - close_notify_alert_reserved -
+ overhead_len - ciphertext_end,
+ ));
+ if (encrypted_content_len == 0) return .{
+ .iovec_end = iovec_end,
+ .ciphertext_end = ciphertext_end,
+ .overhead_len = overhead_len,
+ };
+
+ mem.copy(u8, &cleartext_buf, bytes[bytes_i..][0..encrypted_content_len]);
+ cleartext_buf[encrypted_content_len] = @enumToInt(inner_content_type);
+ bytes_i += encrypted_content_len;
+ const ciphertext_len = encrypted_content_len + 1;
+ const cleartext = cleartext_buf[0..ciphertext_len];
+
+ const record_start = ciphertext_end;
+ const ad = ciphertext_buf[ciphertext_end..][0..5];
+ ad.* =
+ [_]u8{@enumToInt(tls.ContentType.application_data)} ++
+ int2(@enumToInt(tls.ProtocolVersion.tls_1_2)) ++
+ int2(ciphertext_len + P.AEAD.tag_length);
+ ciphertext_end += ad.len;
+ const ciphertext = ciphertext_buf[ciphertext_end..][0..ciphertext_len];
+ ciphertext_end += ciphertext_len;
+ const auth_tag = ciphertext_buf[ciphertext_end..][0..P.AEAD.tag_length];
+ ciphertext_end += auth_tag.len;
+ const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
+ const operand: V = pad ++ @bitCast([8]u8, big(c.write_seq));
+ c.write_seq += 1; // TODO send key_update on overflow
+ const nonce = @as(V, p.client_iv) ^ operand;
+ P.AEAD.encrypt(ciphertext, auth_tag, cleartext, ad, nonce, p.client_key);
+
+ const record = ciphertext_buf[record_start..ciphertext_end];
+ iovecs[iovec_end] = .{
+ .iov_base = record.ptr,
+ .iov_len = record.len,
+ };
+ iovec_end += 1;
+ }
+ },
+ }
+}
+
+pub fn eof(c: Client) bool {
+ return c.received_close_notify and
+ c.partial_cleartext_idx >= c.partial_ciphertext_idx and
+ c.partial_ciphertext_idx >= c.partial_ciphertext_end;
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+/// Returns the number of bytes read, calling the underlying read function the
+/// minimal number of times until the buffer has at least `len` bytes filled.
+/// If the number read is less than `len` it means the stream reached the end.
+/// Reaching the end of the stream is not an error condition.
+pub fn readAtLeast(c: *Client, stream: anytype, buffer: []u8, len: usize) !usize {
+ var iovecs = [1]std.os.iovec{.{ .iov_base = buffer.ptr, .iov_len = buffer.len }};
+ return readvAtLeast(c, stream, &iovecs, len);
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+pub fn read(c: *Client, stream: anytype, buffer: []u8) !usize {
+ return readAtLeast(c, stream, buffer, 1);
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+/// Returns the number of bytes read. If the number read is smaller than
+/// `buffer.len`, it means the stream reached the end. Reaching the end of the
+/// stream is not an error condition.
+pub fn readAll(c: *Client, stream: anytype, buffer: []u8) !usize {
+ return readAtLeast(c, stream, buffer, buffer.len);
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+/// Returns the number of bytes read. If the number read is less than the space
+/// provided it means the stream reached the end. Reaching the end of the
+/// stream is not an error condition.
+/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
+/// order to handle partial reads from the underlying stream layer.
+pub fn readv(c: *Client, stream: anytype, iovecs: []std.os.iovec) !usize {
+ return readvAtLeast(c, stream, iovecs);
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+/// Returns the number of bytes read, calling the underlying read function the
+/// minimal number of times until the iovecs have at least `len` bytes filled.
+/// If the number read is less than `len` it means the stream reached the end.
+/// Reaching the end of the stream is not an error condition.
+/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
+/// order to handle partial reads from the underlying stream layer.
+pub fn readvAtLeast(c: *Client, stream: anytype, iovecs: []std.os.iovec, len: usize) !usize {
+ if (c.eof()) return 0;
+
+ var off_i: usize = 0;
+ var vec_i: usize = 0;
+ while (true) {
+ var amt = try c.readvAdvanced(stream, iovecs[vec_i..]);
+ off_i += amt;
+ if (c.eof() or off_i >= len) return off_i;
+ while (amt >= iovecs[vec_i].iov_len) {
+ amt -= iovecs[vec_i].iov_len;
+ vec_i += 1;
+ }
+ iovecs[vec_i].iov_base += amt;
+ iovecs[vec_i].iov_len -= amt;
+ }
+}
+
+/// Receives TLS-encrypted data from `stream`, which must conform to `StreamInterface`.
+/// Returns number of bytes that have been read, populated inside `iovecs`. A
+/// return value of zero bytes does not mean end of stream. Instead, check the `eof()`
+/// for the end of stream. The `eof()` may be true after any call to
+/// `read`, including when greater than zero bytes are returned, and this
+/// function asserts that `eof()` is `false`.
+/// See `readv` for a higher level function that has the same, familiar API as
+/// other read functions, such as `std.fs.File.read`.
+pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) !usize {
+ var vp: VecPut = .{ .iovecs = iovecs };
+
+ // Give away the buffered cleartext we have, if any.
+ const partial_cleartext = c.partially_read_buffer[c.partial_cleartext_idx..c.partial_ciphertext_idx];
+ if (partial_cleartext.len > 0) {
+ const amt = @intCast(u15, vp.put(partial_cleartext));
+ c.partial_cleartext_idx += amt;
+ if (amt < partial_cleartext.len) {
+ // We still have cleartext left so we cannot issue another read() call yet.
+ assert(vp.total == amt);
+ return amt;
+ }
+ if (c.received_close_notify) {
+ c.partial_ciphertext_end = 0;
+ assert(vp.total == amt);
+ return amt;
+ }
+ if (c.partial_ciphertext_end == c.partial_ciphertext_idx) {
+ c.partial_cleartext_idx = 0;
+ c.partial_ciphertext_idx = 0;
+ c.partial_ciphertext_end = 0;
+ }
+ }
+
+ assert(!c.received_close_notify);
+
+ // Ideally, this buffer would never be used. It is needed when `iovecs` are
+ // too small to fit the cleartext, which may be as large as `max_ciphertext_len`.
+ var cleartext_stack_buffer: [max_ciphertext_len]u8 = undefined;
+ // Temporarily stores ciphertext before decrypting it and giving it to `iovecs`.
+ var in_stack_buffer: [max_ciphertext_len * 4]u8 = undefined;
+ // How many bytes left in the user's buffer.
+ const free_size = vp.freeSize();
+ // The amount of the user's buffer that we need to repurpose for storing
+ // ciphertext. The end of the buffer will be used for such purposes.
+ const ciphertext_buf_len = (free_size / 2) -| in_stack_buffer.len;
+ // The amount of the user's buffer that will be used to give cleartext. The
+ // beginning of the buffer will be used for such purposes.
+ const cleartext_buf_len = free_size - ciphertext_buf_len;
+ const first_iov = c.partially_read_buffer[c.partial_ciphertext_end..];
+
+ var ask_iovecs_buf: [2]std.os.iovec = .{
+ .{
+ .iov_base = first_iov.ptr,
+ .iov_len = first_iov.len,
+ },
+ .{
+ .iov_base = &in_stack_buffer,
+ .iov_len = in_stack_buffer.len,
+ },
+ };
+
+ // Cleartext capacity of output buffer, in records, rounded up.
+ const buf_cap = (cleartext_buf_len +| (max_ciphertext_len - 1)) / max_ciphertext_len;
+ const wanted_read_len = buf_cap * (max_ciphertext_len + tls.record_header_len);
+ const ask_len = @max(wanted_read_len, cleartext_stack_buffer.len);
+ const ask_iovecs = limitVecs(&ask_iovecs_buf, ask_len);
+ const actual_read_len = try stream.readv(ask_iovecs);
+ if (actual_read_len == 0) {
+ // This is either a truncation attack, a bug in the server, or an
+ // intentional omission of the close_notify message due to truncation
+ // detection handled above the TLS layer.
+ if (c.allow_truncation_attacks) {
+ c.received_close_notify = true;
+ } else {
+ return error.TlsConnectionTruncated;
+ }
+ }
+
+ // There might be more bytes inside `in_stack_buffer` that need to be processed,
+ // but at least frag0 will have one complete ciphertext record.
+ const frag0_end = @min(c.partially_read_buffer.len, c.partial_ciphertext_end + actual_read_len);
+ const frag0 = c.partially_read_buffer[c.partial_ciphertext_idx..frag0_end];
+ var frag1 = in_stack_buffer[0..actual_read_len -| first_iov.len];
+ // We need to decipher frag0 and frag1 but there may be a ciphertext record
+ // straddling the boundary. We can handle this with two memcpy() calls to
+ // assemble the straddling record in between handling the two sides.
+ var frag = frag0;
+ var in: usize = 0;
+ while (true) {
+ if (in == frag.len) {
+ // Perfect split.
+ if (frag.ptr == frag1.ptr) {
+ c.partial_ciphertext_end = c.partial_ciphertext_idx;
+ return vp.total;
+ }
+ frag = frag1;
+ in = 0;
+ continue;
+ }
+
+ if (in + tls.record_header_len > frag.len) {
+ if (frag.ptr == frag1.ptr)
+ return finishRead(c, frag, in, vp.total);
+
+ const first = frag[in..];
+
+ if (frag1.len < tls.record_header_len)
+ return finishRead2(c, first, frag1, vp.total);
+
+ // A record straddles the two fragments. Copy into the now-empty first fragment.
+ const record_len_byte_0: u16 = straddleByte(frag, frag1, in + 3);
+ const record_len_byte_1: u16 = straddleByte(frag, frag1, in + 4);
+ const record_len = (record_len_byte_0 << 8) | record_len_byte_1;
+ if (record_len > max_ciphertext_len) return error.TlsRecordOverflow;
+
+ const full_record_len = record_len + tls.record_header_len;
+ const second_len = full_record_len - first.len;
+ if (frag1.len < second_len)
+ return finishRead2(c, first, frag1, vp.total);
+
+ mem.copy(u8, frag[0..in], first);
+ mem.copy(u8, frag[first.len..], frag1[0..second_len]);
+ frag = frag[0..full_record_len];
+ frag1 = frag1[second_len..];
+ in = 0;
+ continue;
+ }
+ const ct = @intToEnum(tls.ContentType, frag[in]);
+ in += 1;
+ const legacy_version = mem.readIntBig(u16, frag[in..][0..2]);
+ in += 2;
+ _ = legacy_version;
+ const record_len = mem.readIntBig(u16, frag[in..][0..2]);
+ if (record_len > max_ciphertext_len) return error.TlsRecordOverflow;
+ in += 2;
+ const end = in + record_len;
+ if (end > frag.len) {
+ // We need the record header on the next iteration of the loop.
+ in -= tls.record_header_len;
+
+ if (frag.ptr == frag1.ptr)
+ return finishRead(c, frag, in, vp.total);
+
+ // A record straddles the two fragments. Copy into the now-empty first fragment.
+ const first = frag[in..];
+ const full_record_len = record_len + tls.record_header_len;
+ const second_len = full_record_len - first.len;
+ if (frag1.len < second_len)
+ return finishRead2(c, first, frag1, vp.total);
+
+ mem.copy(u8, frag[0..in], first);
+ mem.copy(u8, frag[first.len..], frag1[0..second_len]);
+ frag = frag[0..full_record_len];
+ frag1 = frag1[second_len..];
+ in = 0;
+ continue;
+ }
+ switch (ct) {
+ .alert => {
+ if (in + 2 > frag.len) return error.TlsDecodeError;
+ const level = @intToEnum(tls.AlertLevel, frag[in]);
+ const desc = @intToEnum(tls.AlertDescription, frag[in + 1]);
+ _ = level;
+ _ = desc;
+ return error.TlsAlert;
+ },
+ .application_data => {
+ const cleartext = switch (c.application_cipher) {
+ inline else => |*p| c: {
+ const P = @TypeOf(p.*);
+ const V = @Vector(P.AEAD.nonce_length, u8);
+ const ad = frag[in - 5 ..][0..5];
+ const ciphertext_len = record_len - P.AEAD.tag_length;
+ const ciphertext = frag[in..][0..ciphertext_len];
+ in += ciphertext_len;
+ const auth_tag = frag[in..][0..P.AEAD.tag_length].*;
+ const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
+ const operand: V = pad ++ @bitCast([8]u8, big(c.read_seq));
+ const nonce: [P.AEAD.nonce_length]u8 = @as(V, p.server_iv) ^ operand;
+ const out_buf = vp.peek();
+ const cleartext_buf = if (ciphertext.len <= out_buf.len)
+ out_buf
+ else
+ &cleartext_stack_buffer;
+ const cleartext = cleartext_buf[0..ciphertext.len];
+ P.AEAD.decrypt(cleartext, ciphertext, auth_tag, ad, nonce, p.server_key) catch
+ return error.TlsBadRecordMac;
+ break :c cleartext;
+ },
+ };
+
+ c.read_seq = try std.math.add(u64, c.read_seq, 1);
+
+ const inner_ct = @intToEnum(tls.ContentType, cleartext[cleartext.len - 1]);
+ switch (inner_ct) {
+ .alert => {
+ const level = @intToEnum(tls.AlertLevel, cleartext[0]);
+ const desc = @intToEnum(tls.AlertDescription, cleartext[1]);
+ if (desc == .close_notify) {
+ c.received_close_notify = true;
+ c.partial_ciphertext_end = c.partial_ciphertext_idx;
+ return vp.total;
+ }
+ _ = level;
+ return error.TlsAlert;
+ },
+ .handshake => {
+ var ct_i: usize = 0;
+ while (true) {
+ const handshake_type = @intToEnum(tls.HandshakeType, cleartext[ct_i]);
+ ct_i += 1;
+ const handshake_len = mem.readIntBig(u24, cleartext[ct_i..][0..3]);
+ ct_i += 3;
+ const next_handshake_i = ct_i + handshake_len;
+ if (next_handshake_i > cleartext.len - 1)
+ return error.TlsBadLength;
+ const handshake = cleartext[ct_i..next_handshake_i];
+ switch (handshake_type) {
+ .new_session_ticket => {
+ // This client implementation ignores new session tickets.
+ },
+ .key_update => {
+ switch (c.application_cipher) {
+ inline else => |*p| {
+ const P = @TypeOf(p.*);
+ const server_secret = hkdfExpandLabel(P.Hkdf, p.server_secret, "traffic upd", "", P.Hash.digest_length);
+ p.server_secret = server_secret;
+ p.server_key = hkdfExpandLabel(P.Hkdf, server_secret, "key", "", P.AEAD.key_length);
+ p.server_iv = hkdfExpandLabel(P.Hkdf, server_secret, "iv", "", P.AEAD.nonce_length);
+ },
+ }
+ c.read_seq = 0;
+
+ switch (@intToEnum(tls.KeyUpdateRequest, handshake[0])) {
+ .update_requested => {
+ switch (c.application_cipher) {
+ inline else => |*p| {
+ const P = @TypeOf(p.*);
+ const client_secret = hkdfExpandLabel(P.Hkdf, p.client_secret, "traffic upd", "", P.Hash.digest_length);
+ p.client_secret = client_secret;
+ p.client_key = hkdfExpandLabel(P.Hkdf, client_secret, "key", "", P.AEAD.key_length);
+ p.client_iv = hkdfExpandLabel(P.Hkdf, client_secret, "iv", "", P.AEAD.nonce_length);
+ },
+ }
+ c.write_seq = 0;
+ },
+ .update_not_requested => {},
+ _ => return error.TlsIllegalParameter,
+ }
+ },
+ else => {
+ return error.TlsUnexpectedMessage;
+ },
+ }
+ ct_i = next_handshake_i;
+ if (ct_i >= cleartext.len - 1) break;
+ }
+ },
+ .application_data => {
+ // Determine whether the output buffer or a stack
+ // buffer was used for storing the cleartext.
+ if (cleartext.ptr == &cleartext_stack_buffer) {
+ // Stack buffer was used, so we must copy to the output buffer.
+ const msg = cleartext[0 .. cleartext.len - 1];
+ if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
+ // We have already run out of room in iovecs. Continue
+ // appending to `partially_read_buffer`.
+ const dest = c.partially_read_buffer[c.partial_ciphertext_idx..];
+ mem.copy(u8, dest, msg);
+ c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len);
+ } else {
+ const amt = vp.put(msg);
+ if (amt < msg.len) {
+ const rest = msg[amt..];
+ c.partial_cleartext_idx = 0;
+ c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len);
+ mem.copy(u8, &c.partially_read_buffer, rest);
+ }
+ }
+ } else {
+ // Output buffer was used directly which means no
+ // memory copying needs to occur, and we can move
+ // on to the next ciphertext record.
+ vp.next(cleartext.len - 1);
+ }
+ },
+ else => {
+ return error.TlsUnexpectedMessage;
+ },
+ }
+ },
+ else => {
+ return error.TlsUnexpectedMessage;
+ },
+ }
+ in = end;
+ }
+}
+
+fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
+ const saved_buf = frag[in..];
+ if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
+ // There is cleartext at the beginning already which we need to preserve.
+ c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len);
+ mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx..], saved_buf);
+ } else {
+ c.partial_cleartext_idx = 0;
+ c.partial_ciphertext_idx = 0;
+ c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len);
+ mem.copy(u8, &c.partially_read_buffer, saved_buf);
+ }
+ return out;
+}
+
+fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usize {
+ if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
+ // There is cleartext at the beginning already which we need to preserve.
+ c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len);
+ mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx..], first);
+ mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..], frag1);
+ } else {
+ c.partial_cleartext_idx = 0;
+ c.partial_ciphertext_idx = 0;
+ c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len);
+ mem.copy(u8, &c.partially_read_buffer, first);
+ mem.copy(u8, c.partially_read_buffer[first.len..], frag1);
+ }
+ return out;
+}
+
+fn straddleByte(s1: []const u8, s2: []const u8, index: usize) u8 {
+ if (index < s1.len) {
+ return s1[index];
+ } else {
+ return s2[index - s1.len];
+ }
+}
+
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
+inline fn big(x: anytype) @TypeOf(x) {
+ return switch (native_endian) {
+ .Big => x,
+ .Little => @byteSwap(x),
+ };
+}
+
+fn SchemeEcdsa(comptime scheme: tls.SignatureScheme) type {
+ return switch (scheme) {
+ .ecdsa_secp256r1_sha256 => crypto.sign.ecdsa.EcdsaP256Sha256,
+ .ecdsa_secp384r1_sha384 => crypto.sign.ecdsa.EcdsaP384Sha384,
+ .ecdsa_secp521r1_sha512 => crypto.sign.ecdsa.EcdsaP512Sha512,
+ else => @compileError("bad scheme"),
+ };
+}
+
+/// Abstraction for sending multiple byte buffers to a slice of iovecs.
+const VecPut = struct {
+ iovecs: []const std.os.iovec,
+ idx: usize = 0,
+ off: usize = 0,
+ total: usize = 0,
+
+ /// Returns the amount actually put which is always equal to bytes.len
+ /// unless the vectors ran out of space.
+ fn put(vp: *VecPut, bytes: []const u8) usize {
+ var bytes_i: usize = 0;
+ while (true) {
+ const v = vp.iovecs[vp.idx];
+ const dest = v.iov_base[vp.off..v.iov_len];
+ const src = bytes[bytes_i..][0..@min(dest.len, bytes.len - bytes_i)];
+ mem.copy(u8, dest, src);
+ bytes_i += src.len;
+ vp.off += src.len;
+ if (vp.off >= v.iov_len) {
+ vp.off = 0;
+ vp.idx += 1;
+ if (vp.idx >= vp.iovecs.len) {
+ vp.total += bytes_i;
+ return bytes_i;
+ }
+ }
+ if (bytes_i >= bytes.len) {
+ vp.total += bytes_i;
+ return bytes_i;
+ }
+ }
+ }
+
+ /// Returns the next buffer that consecutive bytes can go into.
+ fn peek(vp: VecPut) []u8 {
+ if (vp.idx >= vp.iovecs.len) return &.{};
+ const v = vp.iovecs[vp.idx];
+ return v.iov_base[vp.off..v.iov_len];
+ }
+
+ // After writing to the result of peek(), one can call next() to
+ // advance the cursor.
+ fn next(vp: *VecPut, len: usize) void {
+ vp.total += len;
+ vp.off += len;
+ if (vp.off >= vp.iovecs[vp.idx].iov_len) {
+ vp.off = 0;
+ vp.idx += 1;
+ }
+ }
+
+ fn freeSize(vp: VecPut) usize {
+ if (vp.idx >= vp.iovecs.len) return 0;
+ var total: usize = 0;
+ total += vp.iovecs[vp.idx].iov_len - vp.off;
+ if (vp.idx + 1 >= vp.iovecs.len) return total;
+ for (vp.iovecs[vp.idx + 1 ..]) |v| total += v.iov_len;
+ return total;
+ }
+};
+
+/// Limit iovecs to a specific byte size.
+fn limitVecs(iovecs: []std.os.iovec, len: usize) []std.os.iovec {
+ var vec_i: usize = 0;
+ var bytes_left: usize = len;
+ while (true) {
+ if (bytes_left >= iovecs[vec_i].iov_len) {
+ bytes_left -= iovecs[vec_i].iov_len;
+ vec_i += 1;
+ if (vec_i == iovecs.len or bytes_left == 0) return iovecs[0..vec_i];
+ continue;
+ }
+ iovecs[vec_i].iov_len = bytes_left;
+ return iovecs[0..vec_i];
+ }
+}
+
+/// The priority order here is chosen based on what crypto algorithms Zig has
+/// available in the standard library as well as what is faster. Following are
+/// a few data points on the relative performance of these algorithms.
+///
+/// Measurement taken with 0.11.0-dev.810+c2f5848fe
+/// on x86_64-linux Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz:
+/// zig run .lib/std/crypto/benchmark.zig -OReleaseFast
+/// aegis-128l: 15382 MiB/s
+/// aegis-256: 9553 MiB/s
+/// aes128-gcm: 3721 MiB/s
+/// aes256-gcm: 3010 MiB/s
+/// chacha20Poly1305: 597 MiB/s
+///
+/// Measurement taken with 0.11.0-dev.810+c2f5848fe
+/// on x86_64-linux Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz:
+/// zig run .lib/std/crypto/benchmark.zig -OReleaseFast -mcpu=baseline
+/// aegis-128l: 629 MiB/s
+/// chacha20Poly1305: 529 MiB/s
+/// aegis-256: 461 MiB/s
+/// aes128-gcm: 138 MiB/s
+/// aes256-gcm: 120 MiB/s
+const cipher_suites = enum_array(tls.CipherSuite, &.{
+ .AEGIS_128L_SHA256,
+ .AEGIS_256_SHA384,
+ .AES_128_GCM_SHA256,
+ .AES_256_GCM_SHA384,
+ .CHACHA20_POLY1305_SHA256,
+});
+
+test {
+ _ = StreamInterface;
+}
diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig
index 0a3540d895..fd7264e737 100644
--- a/lib/std/crypto/utils.zig
+++ b/lib/std/crypto/utils.zig
@@ -87,15 +87,19 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T,
if (endian == .Little) {
var i: usize = 0;
while (i < len) : (i += 1) {
- const tmp = @boolToInt(@addWithOverflow(u8, a[i], b[i], &result[i]));
- carry = tmp | @boolToInt(@addWithOverflow(u8, result[i], carry, &result[i]));
+ const ov1 = @addWithOverflow(a[i], b[i]);
+ const ov2 = @addWithOverflow(ov1[0], carry);
+ result[i] = ov2[0];
+ carry = ov1[1] | ov2[1];
}
} else {
var i: usize = len;
while (i != 0) {
i -= 1;
- const tmp = @boolToInt(@addWithOverflow(u8, a[i], b[i], &result[i]));
- carry = tmp | @boolToInt(@addWithOverflow(u8, result[i], carry, &result[i]));
+ const ov1 = @addWithOverflow(a[i], b[i]);
+ const ov2 = @addWithOverflow(ov1[0], carry);
+ result[i] = ov2[0];
+ carry = ov1[1] | ov2[1];
}
}
return @bitCast(bool, carry);
@@ -110,15 +114,19 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
if (endian == .Little) {
var i: usize = 0;
while (i < len) : (i += 1) {
- const tmp = @boolToInt(@subWithOverflow(u8, a[i], b[i], &result[i]));
- borrow = tmp | @boolToInt(@subWithOverflow(u8, result[i], borrow, &result[i]));
+ const ov1 = @subWithOverflow(a[i], b[i]);
+ const ov2 = @subWithOverflow(ov1[0], borrow);
+ result[i] = ov2[0];
+ borrow = ov1[1] | ov2[1];
}
} else {
var i: usize = len;
while (i != 0) {
i -= 1;
- const tmp = @boolToInt(@subWithOverflow(u8, a[i], b[i], &result[i]));
- borrow = tmp | @boolToInt(@subWithOverflow(u8, result[i], borrow, &result[i]));
+ const ov1 = @subWithOverflow(a[i], b[i]);
+ const ov2 = @subWithOverflow(ov1[0], borrow);
+ result[i] = ov2[0];
+ borrow = ov1[1] | ov2[1];
}
}
return @bitCast(bool, borrow);
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index c48324422d..5bfac5bcb7 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -109,17 +109,24 @@ pub fn getSelfDebugInfo() !*DebugInfo {
}
}
-pub fn detectTTYConfig() TTY.Config {
+pub fn detectTTYConfig(file: std.fs.File) TTY.Config {
if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) {
return .escape_codes;
} else if (process.hasEnvVarConstant("NO_COLOR")) {
return .no_color;
} else {
- const stderr_file = io.getStdErr();
- if (stderr_file.supportsAnsiEscapeCodes()) {
+ if (file.supportsAnsiEscapeCodes()) {
return .escape_codes;
- } else if (native_os == .windows and stderr_file.isTty()) {
- return .{ .windows_api = stderr_file };
+ } else if (native_os == .windows and file.isTty()) {
+ var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
+ if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) {
+ // TODO: Should this return an error instead?
+ return .no_color;
+ }
+ return .{ .windows_api = .{
+ .handle = file.handle,
+ .reset_attributes = info.wAttributes,
+ } };
} else {
return .no_color;
}
@@ -146,7 +153,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
- writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(), start_addr) catch |err| {
+ writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(io.getStdErr()), start_addr) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
@@ -174,7 +181,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
- const tty_config = detectTTYConfig();
+ const tty_config = detectTTYConfig(io.getStdErr());
printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return;
var it = StackIterator.init(null, bp);
while (it.next()) |return_address| {
@@ -257,7 +264,7 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
- writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig()) catch |err| {
+ writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig(io.getStdErr())) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
@@ -600,7 +607,12 @@ pub const TTY = struct {
pub const Config = union(enum) {
no_color,
escape_codes,
- windows_api: File,
+ windows_api: if (native_os == .windows) WindowsContext else void,
+
+ pub const WindowsContext = struct {
+ handle: File.Handle,
+ reset_attributes: u16,
+ };
pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void {
nosuspend switch (conf) {
@@ -617,19 +629,16 @@ pub const TTY = struct {
};
try out_stream.writeAll(color_string);
},
- .windows_api => |file| if (native_os == .windows) {
- var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
- if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE)
- return error.FailedRetrievingTerminalInfo;
+ .windows_api => |ctx| if (native_os == .windows) {
const attributes = switch (color) {
.Red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
.Green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
.Cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
.White, .Bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
.Dim => windows.FOREGROUND_INTENSITY,
- .Reset => info.wAttributes,
+ .Reset => ctx.reset_attributes,
};
- try windows.SetConsoleTextAttribute(file.handle, attributes);
+ try windows.SetConsoleTextAttribute(ctx.handle, attributes);
} else {
unreachable;
},
@@ -1119,7 +1128,10 @@ fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
for (slice) |byte| {
if (line == line_info.line) {
- try out_stream.writeByte(byte);
+ switch (byte) {
+ '\t' => try out_stream.writeByte(' '),
+ else => try out_stream.writeByte(byte),
+ }
if (byte == '\n') {
return;
}
@@ -1999,13 +2011,18 @@ pub fn dumpStackPointerAddr(prefix: []const u8) void {
std.debug.print("{} sp = 0x{x}\n", .{ prefix, sp });
}
-test "#4353: std.debug should manage resources correctly" {
+test "manage resources correctly" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .x86_64) {
+ // https://github.com/ziglang/zig/issues/13963
+ return error.SkipZigTest;
+ }
+
const writer = std.io.null_writer;
var di = try openSelfDebugInfo(testing.allocator);
defer di.deinit();
- try printSourceAtAddress(&di, writer, showMyTrace(), detectTTYConfig());
+ try printSourceAtAddress(&di, writer, showMyTrace(), detectTTYConfig(std.io.getStdErr()));
}
noinline fn showMyTrace() usize {
@@ -2065,7 +2082,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
pub fn dump(t: @This()) void {
if (!enabled) return;
- const tty_config = detectTTYConfig();
+ const tty_config = detectTTYConfig(std.io.getStdErr());
const stderr = io.getStdErr().writer();
const end = @min(t.index, size);
const debug_info = getSelfDebugInfo() catch |err| {
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index d71bc96ad7..2640e6aac9 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -15,7 +15,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
for (std.meta.fields(E)) |field| {
fields = fields ++ &[_]StructField{.{
.name = field.name,
- .field_type = Data,
+ .type = Data,
.default_value = if (field_default) |d| @ptrCast(?*const anyopaque, &d) else null,
.is_comptime = false,
.alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0,
@@ -878,7 +878,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type {
/// index order. Modifications to the set during iteration
/// may or may not be observed by the iterator, but will
/// not invalidate it.
- pub fn iterator(self: *Self) Iterator {
+ pub fn iterator(self: *const Self) Iterator {
return .{ .inner = self.bits.iterator(.{}) };
}
@@ -970,6 +970,24 @@ test "pure EnumSet fns" {
try testing.expect(full.differenceWith(black).eql(red));
}
+test "std.enums.EnumSet const iterator" {
+ const Direction = enum { up, down, left, right };
+ const diag_move = init: {
+ var move = EnumSet(Direction).initEmpty();
+ move.insert(.right);
+ move.insert(.up);
+ break :init move;
+ };
+
+ var result = EnumSet(Direction).initEmpty();
+ var it = diag_move.iterator();
+ while (it.next()) |dir| {
+ result.insert(dir);
+ }
+
+ try testing.expect(result.eql(diag_move));
+}
+
/// A map from keys to values, using an index lookup. Uses a
/// bitfield to track presence and a dense array of values.
/// This type does no allocation and can be copied by value.
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 8c65ab6fd2..94b25c79a1 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -1724,7 +1724,7 @@ pub const ParseIntError = error{
/// ) !void;
///
pub fn Formatter(comptime format_fn: anytype) type {
- const Data = @typeInfo(@TypeOf(format_fn)).Fn.args[0].arg_type.?;
+ const Data = @typeInfo(@TypeOf(format_fn)).Fn.params[0].type.?;
return struct {
data: Data,
pub fn format(
@@ -2773,3 +2773,21 @@ test "runtime precision specifier" {
try expectFmt("3.14e+00", "{:1.[1]}", .{ number, precision });
try expectFmt("3.14e+00", "{:1.[precision]}", .{ .number = number, .precision = precision });
}
+
+test "recursive format function" {
+ const R = union(enum) {
+ const R = @This();
+ Leaf: i32,
+ Branch: struct { left: *const R, right: *const R },
+
+ pub fn format(self: R, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
+ return switch (self) {
+ .Leaf => |n| std.fmt.format(writer, "Leaf({})", .{n}),
+ .Branch => |b| std.fmt.format(writer, "Branch({}, {})", .{ b.left, b.right }),
+ };
+ }
+ };
+
+ var r = R{ .Leaf = 1 };
+ try expectFmt("Leaf(1)\n", "{}\n", .{&r});
+}
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index b84b222721..427ac727c9 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -70,6 +70,11 @@ test "fmt.parseFloat" {
}
test "fmt.parseFloat nan and inf" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12027
+ return error.SkipZigTest;
+ }
+
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index d2df596b3e..3f0bd9d645 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -34,7 +34,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
/// fit into a UTF-8 encoded array of this length.
/// The byte count includes room for a null sentinel byte.
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
- .linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku, .solaris => os.PATH_MAX,
+ .linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .solaris => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@@ -54,10 +54,10 @@ pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
/// (depending on the platform) this assumption may not hold for every configuration.
/// The byte count does not include a null sentinel byte.
pub const MAX_NAME_BYTES = switch (builtin.os.tag) {
- .linux, .macos, .ios, .freebsd, .dragonfly => os.NAME_MAX,
+ .linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly => os.NAME_MAX,
// Haiku's NAME_MAX includes the null terminator, so subtract one.
.haiku => os.NAME_MAX - 1,
- .netbsd, .openbsd, .solaris => os.MAXNAMLEN,
+ .solaris => os.system.MAXNAMLEN,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@@ -1794,6 +1794,9 @@ pub const Dir = struct {
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.NOT_A_DIRECTORY => return error.NotDir,
+ // This can happen if the directory has 'List folder contents' permission set to 'Deny'
+ // and the directory is trying to be opened for iteration.
+ .ACCESS_DENIED => return error.AccessDenied,
.INVALID_PARAMETER => unreachable,
else => return w.unexpectedStatus(rc),
}
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index 91244e34bd..3d6ed27538 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -460,7 +460,7 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
}
/// This function is like a series of `cd` statements executed one after another.
-/// It resolves "." and "..".
+/// It resolves "." and "..", but will not convert relative path to absolute path, use std.fs.Dir.realpath instead.
/// The result does not have a trailing path separator.
/// Each drive has its own current working directory.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
@@ -637,7 +637,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
}
/// This function is like a series of `cd` statements executed one after another.
-/// It resolves "." and "..".
+/// It resolves "." and "..", but will not convert relative path to absolute path, use std.fs.Dir.realpath instead.
/// The result does not have a trailing path separator.
/// This function does not perform any syscalls. Executing this series of path
/// lookups on the actual filesystem may produce different results due to
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index c497e213bf..16458d7dc4 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -446,8 +446,8 @@ test "file operations on directories" {
try testing.expectError(error.IsDir, tmp_dir.dir.createFile(test_dir_name, .{}));
try testing.expectError(error.IsDir, tmp_dir.dir.deleteFile(test_dir_name));
switch (builtin.os.tag) {
- // NetBSD does not error when reading a directory.
- .netbsd => {},
+ // no error when reading a directory.
+ .dragonfly, .netbsd => {},
// Currently, WASI will return error.Unexpected (via ENOTCAPABLE) when attempting fd_read on a directory handle.
// TODO: Re-enable on WASI once https://github.com/bytecodealliance/wasmtime/issues/1935 is resolved.
.wasi => {},
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index aac4b0feaf..d4640262f2 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -134,7 +134,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
hash(hasher, tag, strat);
inline for (info.fields) |field| {
if (@field(tag_type, field.name) == tag) {
- if (field.field_type != void) {
+ if (field.type != void) {
hash(hasher, @field(key, field.name), strat);
}
// TODO use a labelled break when it does not crash the compiler. cf #2908
@@ -163,14 +163,14 @@ fn typeContainsSlice(comptime K: type) bool {
}
if (meta.trait.is(.Struct)(K)) {
inline for (@typeInfo(K).Struct.fields) |field| {
- if (typeContainsSlice(field.field_type)) {
+ if (typeContainsSlice(field.type)) {
return true;
}
}
}
if (meta.trait.is(.Union)(K)) {
inline for (@typeInfo(K).Union.fields) |field| {
- if (typeContainsSlice(field.field_type)) {
+ if (typeContainsSlice(field.type)) {
return true;
}
}
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 7f9783f91d..05205e6f07 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -186,11 +186,11 @@ pub fn verifyContext(
const info = @typeInfo(@TypeOf(hash));
if (info == .Fn) {
const func = info.Fn;
- if (func.args.len != 2) {
+ if (func.params.len != 2) {
errors = errors ++ lazy.err_invalid_hash_signature;
} else {
var emitted_signature = false;
- if (func.args[0].arg_type) |Self| {
+ if (func.params[0].type) |Self| {
if (Self == Context) {
// pass, this is always fine.
} else if (Self == *const Context) {
@@ -231,12 +231,12 @@ pub fn verifyContext(
errors = errors ++ ", but is " ++ @typeName(Self);
}
}
- if (func.args[1].arg_type != null and func.args[1].arg_type.? != PseudoKey) {
+ if (func.params[1].type != null and func.params[1].type.? != PseudoKey) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
- errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?);
+ errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.params[1].type.?);
}
if (func.return_type != null and func.return_type.? != Hash) {
if (!emitted_signature) {
@@ -263,11 +263,11 @@ pub fn verifyContext(
if (info == .Fn) {
const func = info.Fn;
const args_len = if (is_array) 4 else 3;
- if (func.args.len != args_len) {
+ if (func.params.len != args_len) {
errors = errors ++ lazy.err_invalid_eql_signature;
} else {
var emitted_signature = false;
- if (func.args[0].arg_type) |Self| {
+ if (func.params[0].type) |Self| {
if (Self == Context) {
// pass, this is always fine.
} else if (Self == *const Context) {
@@ -308,19 +308,19 @@ pub fn verifyContext(
errors = errors ++ ", but is " ++ @typeName(Self);
}
}
- if (func.args[1].arg_type.? != PseudoKey) {
+ if (func.params[1].type.? != PseudoKey) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
- errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?);
+ errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.params[1].type.?);
}
- if (func.args[2].arg_type.? != Key) {
+ if (func.params[2].type.? != Key) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
- errors = errors ++ lazy.deep_prefix ++ "Third parameter must be " ++ @typeName(Key) ++ ", but is " ++ @typeName(func.args[2].arg_type.?);
+ errors = errors ++ lazy.deep_prefix ++ "Third parameter must be " ++ @typeName(Key) ++ ", but is " ++ @typeName(func.params[2].type.?);
}
if (func.return_type.? != bool) {
if (!emitted_signature) {
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index b662cd39bd..d8e88c4933 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -187,9 +187,13 @@ fn rawCAlloc(
) ?[*]u8 {
_ = ret_addr;
assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t)));
- // TODO: change the language to make @ptrCast also do alignment cast
- const ptr = @alignCast(@alignOf(std.c.max_align_t), c.malloc(len));
- return @ptrCast(?[*]align(@alignOf(std.c.max_align_t)) u8, ptr);
+ // Note that this pointer cannot be aligncasted to max_align_t because if
+ // len is < max_align_t then the alignment can be smaller. For example, if
+ // max_align_t is 16, but the user requests 8 bytes, there is no built-in
+ // type in C that is size 8 and has 16 byte alignment, so the alignment may
+ // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
+ // is allowed to return a 1-byte aligned pointer.
+ return @ptrCast(?[*]u8, c.malloc(len));
}
fn rawCResize(
@@ -795,7 +799,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
const large_align: usize = mem.page_size / 2;
var align_mask: usize = undefined;
- _ = @shlWithOverflow(usize, ~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)), &align_mask);
+ align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
var slice = try allocator.alignedAlloc(u8, large_align, 500);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
diff --git a/lib/std/http.zig b/lib/std/http.zig
index 8da6968403..944271df27 100644
--- a/lib/std/http.zig
+++ b/lib/std/http.zig
@@ -1,8 +1,301 @@
-const std = @import("std.zig");
+pub const Client = @import("http/Client.zig");
+
+/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
+/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definiton
+/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
+pub const Method = enum {
+ GET,
+ HEAD,
+ POST,
+ PUT,
+ DELETE,
+ CONNECT,
+ OPTIONS,
+ TRACE,
+ PATCH,
+
+ /// Returns true if a request of this method is allowed to have a body
+ /// Actual behavior from servers may vary and should still be checked
+ pub fn requestHasBody(self: Method) bool {
+ return switch (self) {
+ .POST, .PUT, .PATCH => true,
+ .GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
+ };
+ }
+
+ /// Returns true if a response to this method is allowed to have a body
+ /// Actual behavior from clients may vary and should still be checked
+ pub fn responseHasBody(self: Method) bool {
+ return switch (self) {
+ .GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
+ .HEAD, .PUT, .TRACE => false,
+ };
+ }
+
+ /// An HTTP method is safe if it doesn't alter the state of the server.
+ /// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
+ /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
+ pub fn safe(self: Method) bool {
+ return switch (self) {
+ .GET, .HEAD, .OPTIONS, .TRACE => true,
+ .POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
+ };
+ }
+
+ /// An HTTP method is idempotent if an identical request can be made once or several times in a row with the same effect while leaving the server in the same state.
+ /// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
+ /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
+ pub fn idempotent(self: Method) bool {
+ return switch (self) {
+ .GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
+ .CONNECT, .POST, .PATCH => false,
+ };
+ }
+
+ /// A cacheable response is an HTTP response that can be cached, that is stored to be retrieved and used later, saving a new request to the server.
+ /// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
+ /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
+ pub fn cacheable(self: Method) bool {
+ return switch (self) {
+ .GET, .HEAD => true,
+ .POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
+ };
+ }
+};
+
+/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
+pub const Status = enum(u10) {
+ @"continue" = 100, // RFC7231, Section 6.2.1
+ switching_protocols = 101, // RFC7231, Section 6.2.2
+ processing = 102, // RFC2518
+ early_hints = 103, // RFC8297
+
+ ok = 200, // RFC7231, Section 6.3.1
+ created = 201, // RFC7231, Section 6.3.2
+ accepted = 202, // RFC7231, Section 6.3.3
+ non_authoritative_info = 203, // RFC7231, Section 6.3.4
+ no_content = 204, // RFC7231, Section 6.3.5
+ reset_content = 205, // RFC7231, Section 6.3.6
+ partial_content = 206, // RFC7233, Section 4.1
+ multi_status = 207, // RFC4918
+ already_reported = 208, // RFC5842
+ im_used = 226, // RFC3229
+
+ multiple_choice = 300, // RFC7231, Section 6.4.1
+ moved_permanently = 301, // RFC7231, Section 6.4.2
+ found = 302, // RFC7231, Section 6.4.3
+ see_other = 303, // RFC7231, Section 6.4.4
+ not_modified = 304, // RFC7232, Section 4.1
+ use_proxy = 305, // RFC7231, Section 6.4.5
+ temporary_redirect = 307, // RFC7231, Section 6.4.7
+ permanent_redirect = 308, // RFC7538
+
+ bad_request = 400, // RFC7231, Section 6.5.1
+ unauthorized = 401, // RFC7235, Section 3.1
+ payment_required = 402, // RFC7231, Section 6.5.2
+ forbidden = 403, // RFC7231, Section 6.5.3
+ not_found = 404, // RFC7231, Section 6.5.4
+ method_not_allowed = 405, // RFC7231, Section 6.5.5
+ not_acceptable = 406, // RFC7231, Section 6.5.6
+ proxy_auth_required = 407, // RFC7235, Section 3.2
+ request_timeout = 408, // RFC7231, Section 6.5.7
+ conflict = 409, // RFC7231, Section 6.5.8
+ gone = 410, // RFC7231, Section 6.5.9
+ length_required = 411, // RFC7231, Section 6.5.10
+ precondition_failed = 412, // RFC7232, Section 4.2][RFC8144, Section 3.2
+ payload_too_large = 413, // RFC7231, Section 6.5.11
+ uri_too_long = 414, // RFC7231, Section 6.5.12
+ unsupported_media_type = 415, // RFC7231, Section 6.5.13][RFC7694, Section 3
+ range_not_satisfiable = 416, // RFC7233, Section 4.4
+ expectation_failed = 417, // RFC7231, Section 6.5.14
+ teapot = 418, // RFC 7168, 2.3.3
+ misdirected_request = 421, // RFC7540, Section 9.1.2
+ unprocessable_entity = 422, // RFC4918
+ locked = 423, // RFC4918
+ failed_dependency = 424, // RFC4918
+ too_early = 425, // RFC8470
+ upgrade_required = 426, // RFC7231, Section 6.5.15
+ precondition_required = 428, // RFC6585
+ too_many_requests = 429, // RFC6585
+ header_fields_too_large = 431, // RFC6585
+ unavailable_for_legal_reasons = 451, // RFC7725
+
+ internal_server_error = 500, // RFC7231, Section 6.6.1
+ not_implemented = 501, // RFC7231, Section 6.6.2
+ bad_gateway = 502, // RFC7231, Section 6.6.3
+ service_unavailable = 503, // RFC7231, Section 6.6.4
+ gateway_timeout = 504, // RFC7231, Section 6.6.5
+ http_version_not_supported = 505, // RFC7231, Section 6.6.6
+ variant_also_negotiates = 506, // RFC2295
+ insufficient_storage = 507, // RFC4918
+ loop_detected = 508, // RFC5842
+ not_extended = 510, // RFC2774
+ network_authentication_required = 511, // RFC6585
+
+ _,
+
+ pub fn phrase(self: Status) ?[]const u8 {
+ return switch (self) {
+ // 1xx statuses
+ .@"continue" => "Continue",
+ .switching_protocols => "Switching Protocols",
+ .processing => "Processing",
+ .early_hints => "Early Hints",
-pub const Method = @import("http/method.zig").Method;
-pub const Status = @import("http/status.zig").Status;
+ // 2xx statuses
+ .ok => "OK",
+ .created => "Created",
+ .accepted => "Accepted",
+ .non_authoritative_info => "Non-Authoritative Information",
+ .no_content => "No Content",
+ .reset_content => "Reset Content",
+ .partial_content => "Partial Content",
+ .multi_status => "Multi-Status",
+ .already_reported => "Already Reported",
+ .im_used => "IM Used",
+
+ // 3xx statuses
+ .multiple_choice => "Multiple Choice",
+ .moved_permanently => "Moved Permanently",
+ .found => "Found",
+ .see_other => "See Other",
+ .not_modified => "Not Modified",
+ .use_proxy => "Use Proxy",
+ .temporary_redirect => "Temporary Redirect",
+ .permanent_redirect => "Permanent Redirect",
+
+ // 4xx statuses
+ .bad_request => "Bad Request",
+ .unauthorized => "Unauthorized",
+ .payment_required => "Payment Required",
+ .forbidden => "Forbidden",
+ .not_found => "Not Found",
+ .method_not_allowed => "Method Not Allowed",
+ .not_acceptable => "Not Acceptable",
+ .proxy_auth_required => "Proxy Authentication Required",
+ .request_timeout => "Request Timeout",
+ .conflict => "Conflict",
+ .gone => "Gone",
+ .length_required => "Length Required",
+ .precondition_failed => "Precondition Failed",
+ .payload_too_large => "Payload Too Large",
+ .uri_too_long => "URI Too Long",
+ .unsupported_media_type => "Unsupported Media Type",
+ .range_not_satisfiable => "Range Not Satisfiable",
+ .expectation_failed => "Expectation Failed",
+ .teapot => "I'm a teapot",
+ .misdirected_request => "Misdirected Request",
+ .unprocessable_entity => "Unprocessable Entity",
+ .locked => "Locked",
+ .failed_dependency => "Failed Dependency",
+ .too_early => "Too Early",
+ .upgrade_required => "Upgrade Required",
+ .precondition_required => "Precondition Required",
+ .too_many_requests => "Too Many Requests",
+ .header_fields_too_large => "Request Header Fields Too Large",
+ .unavailable_for_legal_reasons => "Unavailable For Legal Reasons",
+
+ // 5xx statuses
+ .internal_server_error => "Internal Server Error",
+ .not_implemented => "Not Implemented",
+ .bad_gateway => "Bad Gateway",
+ .service_unavailable => "Service Unavailable",
+ .gateway_timeout => "Gateway Timeout",
+ .http_version_not_supported => "HTTP Version Not Supported",
+ .variant_also_negotiates => "Variant Also Negotiates",
+ .insufficient_storage => "Insufficient Storage",
+ .loop_detected => "Loop Detected",
+ .not_extended => "Not Extended",
+ .network_authentication_required => "Network Authentication Required",
+
+ else => return null,
+ };
+ }
+
+ pub const Class = enum {
+ informational,
+ success,
+ redirect,
+ client_error,
+ server_error,
+ };
+
+ pub fn class(self: Status) ?Class {
+ return switch (@enumToInt(self)) {
+ 100...199 => .informational,
+ 200...299 => .success,
+ 300...399 => .redirect,
+ 400...499 => .client_error,
+ 500...599 => .server_error,
+ else => null,
+ };
+ }
+
+ test {
+ try std.testing.expectEqualStrings("OK", Status.ok.phrase().?);
+ try std.testing.expectEqualStrings("Not Found", Status.not_found.phrase().?);
+ }
+
+ test {
+ try std.testing.expectEqual(@as(?Status.Class, Status.Class.success), Status.ok.class());
+ try std.testing.expectEqual(@as(?Status.Class, Status.Class.client_error), Status.not_found.class());
+ }
+};
+
+pub const Headers = struct {
+ state: State = .start,
+ invalid_index: u32 = undefined,
+
+ pub const State = enum { invalid, start, line, nl_r, nl_n, nl2_r, finished };
+
+ /// Returns how many bytes are processed into headers. Always less than or
+ /// equal to bytes.len. If the amount returned is less than bytes.len, it
+ /// means the headers ended and the first byte after the double \r\n\r\n is
+ /// located at `bytes[result]`.
+ pub fn feed(h: *Headers, bytes: []const u8) usize {
+ for (bytes) |b, i| {
+ switch (h.state) {
+ .start => switch (b) {
+ '\r' => h.state = .nl_r,
+ '\n' => return invalid(h, i),
+ else => {},
+ },
+ .nl_r => switch (b) {
+ '\n' => h.state = .nl_n,
+ else => return invalid(h, i),
+ },
+ .nl_n => switch (b) {
+ '\r' => h.state = .nl2_r,
+ else => h.state = .line,
+ },
+ .nl2_r => switch (b) {
+ '\n' => h.state = .finished,
+ else => return invalid(h, i),
+ },
+ .line => switch (b) {
+ '\r' => h.state = .nl_r,
+ '\n' => return invalid(h, i),
+ else => {},
+ },
+ .invalid => return i,
+ .finished => return i,
+ }
+ }
+ return bytes.len;
+ }
+
+ fn invalid(h: *Headers, i: usize) usize {
+ h.invalid_index = @intCast(u32, i);
+ h.state = .invalid;
+ return i;
+ }
+};
+
+const std = @import("std.zig");
test {
- std.testing.refAllDecls(@This());
+ _ = Client;
+ _ = Method;
+ _ = Status;
+ _ = Headers;
}
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
new file mode 100644
index 0000000000..8a4a771416
--- /dev/null
+++ b/lib/std/http/Client.zig
@@ -0,0 +1,181 @@
+//! This API is a barely-touched, barely-functional http client, just the
+//! absolute minimum thing I needed in order to test `std.crypto.tls`. Bear
+//! with me and I promise the API will become useful and streamlined.
+
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const http = std.http;
+const net = std.net;
+const Client = @This();
+const Url = std.Url;
+
+allocator: std.mem.Allocator,
+headers: std.ArrayListUnmanaged(u8) = .{},
+active_requests: usize = 0,
+ca_bundle: std.crypto.Certificate.Bundle = .{},
+
+/// TODO: emit error.UnexpectedEndOfStream or something like that when the read
+/// data does not match the content length. This is necessary since HTTPS disables
+/// close_notify protection on underlying TLS streams.
+pub const Request = struct {
+ client: *Client,
+ stream: net.Stream,
+ headers: std.ArrayListUnmanaged(u8) = .{},
+ tls_client: std.crypto.tls.Client,
+ protocol: Protocol,
+ response_headers: http.Headers = .{},
+
+ pub const Protocol = enum { http, https };
+
+ pub const Options = struct {
+ method: http.Method = .GET,
+ };
+
+ pub fn deinit(req: *Request) void {
+ req.client.active_requests -= 1;
+ req.headers.deinit(req.client.allocator);
+ req.* = undefined;
+ }
+
+ pub fn addHeader(req: *Request, name: []const u8, value: []const u8) !void {
+ const gpa = req.client.allocator;
+ // Ensure an extra +2 for the \r\n in end()
+ try req.headers.ensureUnusedCapacity(gpa, name.len + value.len + 6);
+ req.headers.appendSliceAssumeCapacity(name);
+ req.headers.appendSliceAssumeCapacity(": ");
+ req.headers.appendSliceAssumeCapacity(value);
+ req.headers.appendSliceAssumeCapacity("\r\n");
+ }
+
+ pub fn end(req: *Request) !void {
+ req.headers.appendSliceAssumeCapacity("\r\n");
+ switch (req.protocol) {
+ .http => {
+ try req.stream.writeAll(req.headers.items);
+ },
+ .https => {
+ try req.tls_client.writeAll(req.stream, req.headers.items);
+ },
+ }
+ }
+
+ pub fn readAll(req: *Request, buffer: []u8) !usize {
+ return readAtLeast(req, buffer, buffer.len);
+ }
+
+ pub fn read(req: *Request, buffer: []u8) !usize {
+ return readAtLeast(req, buffer, 1);
+ }
+
+ pub fn readAtLeast(req: *Request, buffer: []u8, len: usize) !usize {
+ assert(len <= buffer.len);
+ var index: usize = 0;
+ while (index < len) {
+ const headers_finished = req.response_headers.state == .finished;
+ const amt = try readAdvanced(req, buffer[index..]);
+ if (amt == 0 and headers_finished) break;
+ index += amt;
+ }
+ return index;
+ }
+
+ /// This one can return 0 without meaning EOF.
+ /// TODO change to readvAdvanced
+ pub fn readAdvanced(req: *Request, buffer: []u8) !usize {
+ if (req.response_headers.state == .finished) return readRaw(req, buffer);
+
+ const amt = try readRaw(req, buffer);
+ const data = buffer[0..amt];
+ const i = req.response_headers.feed(data);
+ if (req.response_headers.state == .invalid) return error.InvalidHttpHeaders;
+ if (i < data.len) {
+ const rest = data[i..];
+ std.mem.copy(u8, buffer, rest);
+ return rest.len;
+ }
+ return 0;
+ }
+
+ /// Only abstracts over http/https.
+ fn readRaw(req: *Request, buffer: []u8) !usize {
+ switch (req.protocol) {
+ .http => return req.stream.read(buffer),
+ .https => return req.tls_client.read(req.stream, buffer),
+ }
+ }
+
+ /// Only abstracts over http/https.
+ fn readAtLeastRaw(req: *Request, buffer: []u8, len: usize) !usize {
+ switch (req.protocol) {
+ .http => return req.stream.readAtLeast(buffer, len),
+ .https => return req.tls_client.readAtLeast(req.stream, buffer, len),
+ }
+ }
+};
+
+pub fn deinit(client: *Client) void {
+ assert(client.active_requests == 0);
+ client.headers.deinit(client.allocator);
+ client.* = undefined;
+}
+
+pub fn request(client: *Client, url: Url, options: Request.Options) !Request {
+ const protocol = std.meta.stringToEnum(Request.Protocol, url.scheme) orelse
+ return error.UnsupportedUrlScheme;
+ const port: u16 = url.port orelse switch (protocol) {
+ .http => 80,
+ .https => 443,
+ };
+
+ var req: Request = .{
+ .client = client,
+ .stream = try net.tcpConnectToHost(client.allocator, url.host, port),
+ .protocol = protocol,
+ .tls_client = undefined,
+ };
+ client.active_requests += 1;
+ errdefer req.deinit();
+
+ switch (protocol) {
+ .http => {},
+ .https => {
+ req.tls_client = try std.crypto.tls.Client.init(req.stream, client.ca_bundle, url.host);
+ // This is appropriate for HTTPS because the HTTP headers contain
+ // the content length which is used to detect truncation attacks.
+ req.tls_client.allow_truncation_attacks = true;
+ },
+ }
+
+ try req.headers.ensureUnusedCapacity(
+ client.allocator,
+ @tagName(options.method).len +
+ 1 +
+ url.path.len +
+ " HTTP/1.1\r\nHost: ".len +
+ url.host.len +
+ "\r\nUpgrade-Insecure-Requests: 1\r\n".len +
+ client.headers.items.len +
+ 2, // for the \r\n at the end of headers
+ );
+ req.headers.appendSliceAssumeCapacity(@tagName(options.method));
+ req.headers.appendSliceAssumeCapacity(" ");
+ req.headers.appendSliceAssumeCapacity(url.path);
+ req.headers.appendSliceAssumeCapacity(" HTTP/1.1\r\nHost: ");
+ req.headers.appendSliceAssumeCapacity(url.host);
+ switch (protocol) {
+ .https => req.headers.appendSliceAssumeCapacity("\r\nUpgrade-Insecure-Requests: 1\r\n"),
+ .http => req.headers.appendSliceAssumeCapacity("\r\n"),
+ }
+ req.headers.appendSliceAssumeCapacity(client.headers.items);
+
+ return req;
+}
+
+pub fn addHeader(client: *Client, name: []const u8, value: []const u8) !void {
+ const gpa = client.allocator;
+ try client.headers.ensureUnusedCapacity(gpa, name.len + value.len + 4);
+ client.headers.appendSliceAssumeCapacity(name);
+ client.headers.appendSliceAssumeCapacity(": ");
+ client.headers.appendSliceAssumeCapacity(value);
+ client.headers.appendSliceAssumeCapacity("\r\n");
+}
diff --git a/lib/std/http/method.zig b/lib/std/http/method.zig
deleted file mode 100644
index c118ca9a47..0000000000
--- a/lib/std/http/method.zig
+++ /dev/null
@@ -1,65 +0,0 @@
-//! HTTP Methods
-//! https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
-
-// Style guide is violated here so that @tagName can be used effectively
-/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definiton
-/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
-pub const Method = enum {
- GET,
- HEAD,
- POST,
- PUT,
- DELETE,
- CONNECT,
- OPTIONS,
- TRACE,
- PATCH,
-
- /// Returns true if a request of this method is allowed to have a body
- /// Actual behavior from servers may vary and should still be checked
- pub fn requestHasBody(self: Method) bool {
- return switch (self) {
- .POST, .PUT, .PATCH => true,
- .GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
- };
- }
-
- /// Returns true if a response to this method is allowed to have a body
- /// Actual behavior from clients may vary and should still be checked
- pub fn responseHasBody(self: Method) bool {
- return switch (self) {
- .GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
- .HEAD, .PUT, .TRACE => false,
- };
- }
-
- /// An HTTP method is safe if it doesn't alter the state of the server.
- /// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
- /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
- pub fn safe(self: Method) bool {
- return switch (self) {
- .GET, .HEAD, .OPTIONS, .TRACE => true,
- .POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
- };
- }
-
- /// An HTTP method is idempotent if an identical request can be made once or several times in a row with the same effect while leaving the server in the same state.
- /// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
- /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
- pub fn idempotent(self: Method) bool {
- return switch (self) {
- .GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
- .CONNECT, .POST, .PATCH => false,
- };
- }
-
- /// A cacheable response is an HTTP response that can be cached, that is stored to be retrieved and used later, saving a new request to the server.
- /// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
- /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
- pub fn cacheable(self: Method) bool {
- return switch (self) {
- .GET, .HEAD => true,
- .POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
- };
- }
-};
diff --git a/lib/std/http/status.zig b/lib/std/http/status.zig
deleted file mode 100644
index 91738e0533..0000000000
--- a/lib/std/http/status.zig
+++ /dev/null
@@ -1,182 +0,0 @@
-//! HTTP Status
-//! https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
-
-const std = @import("../std.zig");
-
-pub const Status = enum(u10) {
- @"continue" = 100, // RFC7231, Section 6.2.1
- switching_protocols = 101, // RFC7231, Section 6.2.2
- processing = 102, // RFC2518
- early_hints = 103, // RFC8297
-
- ok = 200, // RFC7231, Section 6.3.1
- created = 201, // RFC7231, Section 6.3.2
- accepted = 202, // RFC7231, Section 6.3.3
- non_authoritative_info = 203, // RFC7231, Section 6.3.4
- no_content = 204, // RFC7231, Section 6.3.5
- reset_content = 205, // RFC7231, Section 6.3.6
- partial_content = 206, // RFC7233, Section 4.1
- multi_status = 207, // RFC4918
- already_reported = 208, // RFC5842
- im_used = 226, // RFC3229
-
- multiple_choice = 300, // RFC7231, Section 6.4.1
- moved_permanently = 301, // RFC7231, Section 6.4.2
- found = 302, // RFC7231, Section 6.4.3
- see_other = 303, // RFC7231, Section 6.4.4
- not_modified = 304, // RFC7232, Section 4.1
- use_proxy = 305, // RFC7231, Section 6.4.5
- temporary_redirect = 307, // RFC7231, Section 6.4.7
- permanent_redirect = 308, // RFC7538
-
- bad_request = 400, // RFC7231, Section 6.5.1
- unauthorized = 401, // RFC7235, Section 3.1
- payment_required = 402, // RFC7231, Section 6.5.2
- forbidden = 403, // RFC7231, Section 6.5.3
- not_found = 404, // RFC7231, Section 6.5.4
- method_not_allowed = 405, // RFC7231, Section 6.5.5
- not_acceptable = 406, // RFC7231, Section 6.5.6
- proxy_auth_required = 407, // RFC7235, Section 3.2
- request_timeout = 408, // RFC7231, Section 6.5.7
- conflict = 409, // RFC7231, Section 6.5.8
- gone = 410, // RFC7231, Section 6.5.9
- length_required = 411, // RFC7231, Section 6.5.10
- precondition_failed = 412, // RFC7232, Section 4.2][RFC8144, Section 3.2
- payload_too_large = 413, // RFC7231, Section 6.5.11
- uri_too_long = 414, // RFC7231, Section 6.5.12
- unsupported_media_type = 415, // RFC7231, Section 6.5.13][RFC7694, Section 3
- range_not_satisfiable = 416, // RFC7233, Section 4.4
- expectation_failed = 417, // RFC7231, Section 6.5.14
- teapot = 418, // RFC 7168, 2.3.3
- misdirected_request = 421, // RFC7540, Section 9.1.2
- unprocessable_entity = 422, // RFC4918
- locked = 423, // RFC4918
- failed_dependency = 424, // RFC4918
- too_early = 425, // RFC8470
- upgrade_required = 426, // RFC7231, Section 6.5.15
- precondition_required = 428, // RFC6585
- too_many_requests = 429, // RFC6585
- header_fields_too_large = 431, // RFC6585
- unavailable_for_legal_reasons = 451, // RFC7725
-
- internal_server_error = 500, // RFC7231, Section 6.6.1
- not_implemented = 501, // RFC7231, Section 6.6.2
- bad_gateway = 502, // RFC7231, Section 6.6.3
- service_unavailable = 503, // RFC7231, Section 6.6.4
- gateway_timeout = 504, // RFC7231, Section 6.6.5
- http_version_not_supported = 505, // RFC7231, Section 6.6.6
- variant_also_negotiates = 506, // RFC2295
- insufficient_storage = 507, // RFC4918
- loop_detected = 508, // RFC5842
- not_extended = 510, // RFC2774
- network_authentication_required = 511, // RFC6585
-
- _,
-
- pub fn phrase(self: Status) ?[]const u8 {
- return switch (self) {
- // 1xx statuses
- .@"continue" => "Continue",
- .switching_protocols => "Switching Protocols",
- .processing => "Processing",
- .early_hints => "Early Hints",
-
- // 2xx statuses
- .ok => "OK",
- .created => "Created",
- .accepted => "Accepted",
- .non_authoritative_info => "Non-Authoritative Information",
- .no_content => "No Content",
- .reset_content => "Reset Content",
- .partial_content => "Partial Content",
- .multi_status => "Multi-Status",
- .already_reported => "Already Reported",
- .im_used => "IM Used",
-
- // 3xx statuses
- .multiple_choice => "Multiple Choice",
- .moved_permanently => "Moved Permanently",
- .found => "Found",
- .see_other => "See Other",
- .not_modified => "Not Modified",
- .use_proxy => "Use Proxy",
- .temporary_redirect => "Temporary Redirect",
- .permanent_redirect => "Permanent Redirect",
-
- // 4xx statuses
- .bad_request => "Bad Request",
- .unauthorized => "Unauthorized",
- .payment_required => "Payment Required",
- .forbidden => "Forbidden",
- .not_found => "Not Found",
- .method_not_allowed => "Method Not Allowed",
- .not_acceptable => "Not Acceptable",
- .proxy_auth_required => "Proxy Authentication Required",
- .request_timeout => "Request Timeout",
- .conflict => "Conflict",
- .gone => "Gone",
- .length_required => "Length Required",
- .precondition_failed => "Precondition Failed",
- .payload_too_large => "Payload Too Large",
- .uri_too_long => "URI Too Long",
- .unsupported_media_type => "Unsupported Media Type",
- .range_not_satisfiable => "Range Not Satisfiable",
- .expectation_failed => "Expectation Failed",
- .teapot => "I'm a teapot",
- .misdirected_request => "Misdirected Request",
- .unprocessable_entity => "Unprocessable Entity",
- .locked => "Locked",
- .failed_dependency => "Failed Dependency",
- .too_early => "Too Early",
- .upgrade_required => "Upgrade Required",
- .precondition_required => "Precondition Required",
- .too_many_requests => "Too Many Requests",
- .header_fields_too_large => "Request Header Fields Too Large",
- .unavailable_for_legal_reasons => "Unavailable For Legal Reasons",
-
- // 5xx statuses
- .internal_server_error => "Internal Server Error",
- .not_implemented => "Not Implemented",
- .bad_gateway => "Bad Gateway",
- .service_unavailable => "Service Unavailable",
- .gateway_timeout => "Gateway Timeout",
- .http_version_not_supported => "HTTP Version Not Supported",
- .variant_also_negotiates => "Variant Also Negotiates",
- .insufficient_storage => "Insufficient Storage",
- .loop_detected => "Loop Detected",
- .not_extended => "Not Extended",
- .network_authentication_required => "Network Authentication Required",
-
- else => return null,
- };
- }
-
- pub const Class = enum {
- informational,
- success,
- redirect,
- client_error,
- server_error,
- };
-
- pub fn class(self: Status) ?Class {
- return switch (@enumToInt(self)) {
- 100...199 => .informational,
- 200...299 => .success,
- 300...399 => .redirect,
- 400...499 => .client_error,
- 500...599 => .server_error,
- else => null,
- };
- }
-};
-
-test {
- try std.testing.expectEqualStrings("OK", Status.ok.phrase().?);
- try std.testing.expectEqualStrings("Not Found", Status.not_found.phrase().?);
-}
-
-test {
- try std.testing.expectEqual(@as(?Status.Class, Status.Class.success), Status.ok.class());
- try std.testing.expectEqual(@as(?Status.Class, Status.Class.client_error), Status.not_found.class());
-}
diff --git a/lib/std/io/buffered_reader.zig b/lib/std/io/buffered_reader.zig
index b803e37602..aca665fb30 100644
--- a/lib/std/io/buffered_reader.zig
+++ b/lib/std/io/buffered_reader.zig
@@ -1,35 +1,39 @@
const std = @import("../std.zig");
const io = std.io;
+const mem = std.mem;
const assert = std.debug.assert;
const testing = std.testing;
pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) type {
return struct {
unbuffered_reader: ReaderType,
- fifo: FifoType = FifoType.init(),
+ buf: [buffer_size]u8 = undefined,
+ start: usize = 0,
+ end: usize = 0,
pub const Error = ReaderType.Error;
pub const Reader = io.Reader(*Self, Error, read);
const Self = @This();
- const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size });
pub fn read(self: *Self, dest: []u8) Error!usize {
var dest_index: usize = 0;
+
while (dest_index < dest.len) {
- const written = self.fifo.read(dest[dest_index..]);
+ const written = std.math.min(dest.len - dest_index, self.end - self.start);
+ std.mem.copy(u8, dest[dest_index..], self.buf[self.start .. self.start + written]);
if (written == 0) {
- // fifo empty, fill it
- const writable = self.fifo.writableSlice(0);
- assert(writable.len > 0);
- const n = try self.unbuffered_reader.read(writable);
+ // buf empty, fill it
+ const n = try self.unbuffered_reader.read(self.buf[0..]);
if (n == 0) {
// reading from the unbuffered stream returned nothing
// so we have nothing left to read.
return dest_index;
}
- self.fifo.update(n);
+ self.start = 0;
+ self.end = n;
}
+ self.start += written;
dest_index += written;
}
return dest.len;
@@ -45,7 +49,7 @@ pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(u
return .{ .unbuffered_reader = underlying_stream };
}
-test "io.BufferedReader" {
+test "io.BufferedReader OneByte" {
const OneByteReadReader = struct {
str: []const u8,
curr: usize,
@@ -84,3 +88,102 @@ test "io.BufferedReader" {
defer testing.allocator.free(res);
try testing.expectEqualSlices(u8, str, res);
}
+
+fn smallBufferedReader(underlying_stream: anytype) BufferedReader(8, @TypeOf(underlying_stream)) {
+ return .{ .unbuffered_reader = underlying_stream };
+}
+test "io.BufferedReader Block" {
+ const BlockReader = struct {
+ block: []const u8,
+ reads_allowed: usize,
+ curr_read: usize,
+
+ const Error = error{NoError};
+ const Self = @This();
+ const Reader = io.Reader(*Self, Error, read);
+
+ fn init(block: []const u8, reads_allowed: usize) Self {
+ return Self{
+ .block = block,
+ .reads_allowed = reads_allowed,
+ .curr_read = 0,
+ };
+ }
+
+ fn read(self: *Self, dest: []u8) Error!usize {
+ if (self.curr_read >= self.reads_allowed) {
+ return 0;
+ }
+ std.debug.assert(dest.len >= self.block.len);
+ std.mem.copy(u8, dest, self.block);
+
+ self.curr_read += 1;
+ return self.block.len;
+ }
+
+ fn reader(self: *Self) Reader {
+ return .{ .context = self };
+ }
+ };
+
+ const block = "0123";
+
+ // len out == block
+ {
+ var block_reader = BlockReader.init(block, 2);
+ var test_buf_reader = BufferedReader(4, BlockReader){ .unbuffered_reader = block_reader };
+ var out_buf: [4]u8 = undefined;
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, block);
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, block);
+ try testing.expectEqual(try test_buf_reader.read(&out_buf), 0);
+ }
+
+ // len out < block
+ {
+ var block_reader = BlockReader.init(block, 2);
+ var test_buf_reader = BufferedReader(4, BlockReader){ .unbuffered_reader = block_reader };
+ var out_buf: [3]u8 = undefined;
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, "012");
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, "301");
+ const n = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, out_buf[0..n], "23");
+ try testing.expectEqual(try test_buf_reader.read(&out_buf), 0);
+ }
+
+ // len out > block
+ {
+ var block_reader = BlockReader.init(block, 2);
+ var test_buf_reader = BufferedReader(4, BlockReader){ .unbuffered_reader = block_reader };
+ var out_buf: [5]u8 = undefined;
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, "01230");
+ const n = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, out_buf[0..n], "123");
+ try testing.expectEqual(try test_buf_reader.read(&out_buf), 0);
+ }
+
+ // len out == 0
+ {
+ var block_reader = BlockReader.init(block, 2);
+ var test_buf_reader = BufferedReader(4, BlockReader){ .unbuffered_reader = block_reader };
+ var out_buf: [0]u8 = undefined;
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, "");
+ }
+
+ // len bufreader buf > block
+ {
+ var block_reader = BlockReader.init(block, 2);
+ var test_buf_reader = BufferedReader(5, BlockReader){ .unbuffered_reader = block_reader };
+ var out_buf: [4]u8 = undefined;
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, block);
+ _ = try test_buf_reader.read(&out_buf);
+ try testing.expectEqualSlices(u8, &out_buf, block);
+ try testing.expectEqual(try test_buf_reader.read(&out_buf), 0);
+ }
+}
diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig
index b002bb47b8..f486356491 100644
--- a/lib/std/io/fixed_buffer_stream.zig
+++ b/lib/std/io/fixed_buffer_stream.zig
@@ -132,6 +132,17 @@ test "FixedBufferStream output" {
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
}
+test "FixedBufferStream output at comptime" {
+ comptime {
+ var buf: [255]u8 = undefined;
+ var fbs = fixedBufferStream(&buf);
+ const stream = fbs.writer();
+
+ try stream.print("{s}{s}!", .{ "Hello", "World" });
+ try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
+ }
+}
+
test "FixedBufferStream output 2" {
var buffer: [10]u8 = undefined;
var fbs = fixedBufferStream(&buffer);
diff --git a/lib/std/io/multi_writer.zig b/lib/std/io/multi_writer.zig
index ae683848d0..b3f64f1553 100644
--- a/lib/std/io/multi_writer.zig
+++ b/lib/std/io/multi_writer.zig
@@ -6,7 +6,7 @@ const testing = std.testing;
pub fn MultiWriter(comptime Writers: type) type {
comptime var ErrSet = error{};
inline for (@typeInfo(Writers).Struct.fields) |field| {
- const StreamType = field.field_type;
+ const StreamType = field.type;
ErrSet = ErrSet || StreamType.Error;
}
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 17fc13b0dc..f16d70da80 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1362,7 +1362,7 @@ fn ParseInternalErrorImpl(comptime T: type, comptime inferred_types: []const typ
if (unionInfo.tag_type) |_| {
var errors = error{NoUnionMembersMatched};
for (unionInfo.fields) |u_field| {
- errors = errors || ParseInternalErrorImpl(u_field.field_type, inferred_types ++ [_]type{T});
+ errors = errors || ParseInternalErrorImpl(u_field.type, inferred_types ++ [_]type{T});
}
return errors;
} else {
@@ -1379,7 +1379,7 @@ fn ParseInternalErrorImpl(comptime T: type, comptime inferred_types: []const typ
MissingField,
} || SkipValueError || TokenStream.Error;
for (structInfo.fields) |field| {
- errors = errors || ParseInternalErrorImpl(field.field_type, inferred_types ++ [_]type{T});
+ errors = errors || ParseInternalErrorImpl(field.type, inferred_types ++ [_]type{T});
}
return errors;
},
@@ -1491,7 +1491,7 @@ fn parseInternal(
inline for (unionInfo.fields) |u_field| {
// take a copy of tokens so we can withhold mutations until success
var tokens_copy = tokens.*;
- if (parseInternal(u_field.field_type, token, &tokens_copy, options)) |value| {
+ if (parseInternal(u_field.type, token, &tokens_copy, options)) |value| {
tokens.* = tokens_copy;
return @unionInit(T, u_field.name, value);
} else |err| {
@@ -1519,7 +1519,7 @@ fn parseInternal(
errdefer {
inline for (structInfo.fields) |field, i| {
if (fields_seen[i] and !field.is_comptime) {
- parseFree(field.field_type, @field(r, field.name), options);
+ parseFree(field.type, @field(r, field.name), options);
}
}
}
@@ -1547,24 +1547,24 @@ fn parseInternal(
// }
if (options.duplicate_field_behavior == .UseFirst) {
// unconditonally ignore value. for comptime fields, this skips check against default_value
- parseFree(field.field_type, try parse(field.field_type, tokens, child_options), child_options);
+ parseFree(field.type, try parse(field.type, tokens, child_options), child_options);
found = true;
break;
} else if (options.duplicate_field_behavior == .Error) {
return error.DuplicateJSONField;
} else if (options.duplicate_field_behavior == .UseLast) {
if (!field.is_comptime) {
- parseFree(field.field_type, @field(r, field.name), child_options);
+ parseFree(field.type, @field(r, field.name), child_options);
}
fields_seen[i] = false;
}
}
if (field.is_comptime) {
- if (!try parsesTo(field.field_type, @ptrCast(*align(1) const field.field_type, field.default_value.?).*, tokens, child_options)) {
+ if (!try parsesTo(field.type, @ptrCast(*align(1) const field.type, field.default_value.?).*, tokens, child_options)) {
return error.UnexpectedValue;
}
} else {
- @field(r, field.name) = try parse(field.field_type, tokens, child_options);
+ @field(r, field.name) = try parse(field.type, tokens, child_options);
}
fields_seen[i] = true;
found = true;
@@ -1587,7 +1587,7 @@ fn parseInternal(
if (!fields_seen[i]) {
if (field.default_value) |default_ptr| {
if (!field.is_comptime) {
- const default = @ptrCast(*align(1) const field.field_type, default_ptr).*;
+ const default = @ptrCast(*align(1) const field.type, default_ptr).*;
@field(r, field.name) = default;
}
} else {
@@ -1732,7 +1732,7 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
if (unionInfo.tag_type) |UnionTagType| {
inline for (unionInfo.fields) |u_field| {
if (value == @field(UnionTagType, u_field.name)) {
- parseFree(u_field.field_type, @field(value, u_field.name), options);
+ parseFree(u_field.type, @field(value, u_field.name), options);
break;
}
}
@@ -1743,7 +1743,7 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
.Struct => |structInfo| {
inline for (structInfo.fields) |field| {
if (!field.is_comptime) {
- parseFree(field.field_type, @field(value, field.name), options);
+ parseFree(field.type, @field(value, field.name), options);
}
}
},
@@ -2270,12 +2270,12 @@ pub fn stringify(
}
inline for (S.fields) |Field| {
// don't include void fields
- if (Field.field_type == void) continue;
+ if (Field.type == void) continue;
var emit_field = true;
// don't include optional fields that are null when emit_null_optional_fields is set to false
- if (@typeInfo(Field.field_type) == .Optional) {
+ if (@typeInfo(Field.type) == .Optional) {
if (options.emit_null_optional_fields == false) {
if (@field(value, Field.name) == null) {
emit_field = false;
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index 188f9d219a..bc5955d16a 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -15,11 +15,11 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
while (group < max_group) : (group += 1) {
const byte = try reader.readByte();
- var temp = @as(U, byte & 0x7f);
- if (@shlWithOverflow(U, temp, group * 7, &temp)) return error.Overflow;
+ const ov = @shlWithOverflow(@as(U, byte & 0x7f), group * 7);
+ if (ov[1] != 0) return error.Overflow;
- value |= temp;
+ value |= ov[0];
if (byte & 0x80 == 0) break;
} else {
return error.Overflow;
@@ -65,13 +65,13 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
while (group < max_group) : (group += 1) {
const byte = try reader.readByte();
- var temp = @as(U, byte & 0x7f);
const shift = group * 7;
- if (@shlWithOverflow(U, temp, shift, &temp)) {
+ const ov = @shlWithOverflow(@as(U, byte & 0x7f), shift);
+ if (ov[1] != 0) {
// Overflow is ok so long as the sign bit is set and this is the last byte
if (byte & 0x80 != 0) return error.Overflow;
- if (@bitCast(S, temp) >= 0) return error.Overflow;
+ if (@bitCast(S, ov[0]) >= 0) return error.Overflow;
// and all the overflowed bits are 1
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
@@ -80,14 +80,14 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
} else {
// If we don't overflow and this is the last byte and the number being decoded
// is negative, check that the remaining bits are 1
- if ((byte & 0x80 == 0) and (@bitCast(S, temp) < 0)) {
+ if ((byte & 0x80 == 0) and (@bitCast(S, ov[0]) < 0)) {
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
}
- value |= temp;
+ value |= ov[0];
if (byte & 0x80 == 0) {
const needs_sign_ext = group + 1 < max_group;
if (byte & 0x40 != 0 and needs_sign_ext) {
@@ -347,11 +347,6 @@ fn test_write_leb128(value: anytype) !void {
}
test "serialize unsigned LEB128" {
- if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .riscv64) {
- // https://github.com/ziglang/zig/issues/12031
- return error.SkipZigTest;
- }
-
const max_bits = 18;
comptime var t = 0;
@@ -366,11 +361,6 @@ test "serialize unsigned LEB128" {
}
test "serialize signed LEB128" {
- if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .riscv64) {
- // https://github.com/ziglang/zig/issues/12031
- return error.SkipZigTest;
- }
-
// explicitly test i0 because starting `t` at 0
// will break the while loop
try test_write_leb128(@as(i0, 0));
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 9ebe85c004..1d1b514c03 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -29,9 +29,9 @@
//! args: anytype,
//! ) void {
//! // Ignore all non-error logging from sources other than
-//! // .my_project, .nice_library and .default
+//! // .my_project, .nice_library and the default
//! const scope_prefix = "(" ++ switch (scope) {
-//! .my_project, .nice_library, .default => @tagName(scope),
+//! .my_project, .nice_library, std.log.default_log_scope => @tagName(scope),
//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.err))
//! @tagName(scope)
//! else
@@ -125,22 +125,28 @@ fn log(
comptime format: []const u8,
args: anytype,
) void {
- const effective_log_level = blk: {
- inline for (scope_levels) |scope_level| {
- if (scope_level.scope == scope) break :blk scope_level.level;
- }
- break :blk level;
- };
+ if (comptime !logEnabled(message_level, scope)) return;
+
+ if (@hasDecl(root, "log")) {
+ if (@typeInfo(@TypeOf(root.log)) != .Fn)
+ @compileError("Expected root.log to be a function");
+ root.log(message_level, scope, format, args);
+ } else {
+ defaultLog(message_level, scope, format, args);
+ }
+}
- if (@enumToInt(message_level) <= @enumToInt(effective_log_level)) {
- if (@hasDecl(root, "log")) {
- if (@typeInfo(@TypeOf(root.log)) != .Fn)
- @compileError("Expected root.log to be a function");
- root.log(message_level, scope, format, args);
- } else {
- defaultLog(message_level, scope, format, args);
- }
+/// Determine if a specific log message level and scope combination are enabled for logging.
+pub fn logEnabled(comptime message_level: Level, comptime scope: @Type(.EnumLiteral)) bool {
+ inline for (scope_levels) |scope_level| {
+ if (scope_level.scope == scope) return @enumToInt(message_level) <= @enumToInt(scope_level.level);
}
+ return @enumToInt(message_level) <= @enumToInt(level);
+}
+
+/// Determine if a specific log message level using the default log scope is enabled for logging.
+pub fn defaultLogEnabled(comptime message_level: Level) bool {
+ return comptime logEnabled(message_level, default_log_scope);
}
/// The default implementation for root.log. root.log may forward log messages
@@ -210,8 +216,10 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
};
}
+pub const default_log_scope = .default;
+
/// The default scoped logging namespace.
-pub const default = scoped(.default);
+pub const default = scoped(default_log_scope);
/// Log an error message using the default scope. This log level is intended to
/// be used when something has gone wrong. This might be recoverable or might
diff --git a/lib/std/macho.zig b/lib/std/macho.zig
index cb1fca20b2..bb80e78a92 100644
--- a/lib/std/macho.zig
+++ b/lib/std/macho.zig
@@ -58,10 +58,10 @@ pub const uuid_command = extern struct {
cmd: LC = .UUID,
/// sizeof(struct uuid_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(uuid_command),
/// the 128-bit uuid
- uuid: [16]u8,
+ uuid: [16]u8 = undefined,
};
/// The version_min_command contains the min OS version on which this
@@ -71,7 +71,7 @@ pub const version_min_command = extern struct {
cmd: LC,
/// sizeof(struct version_min_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(version_min_command),
/// X.Y.Z is encoded in nibbles xxxx.yy.zz
version: u32,
@@ -87,7 +87,7 @@ pub const source_version_command = extern struct {
cmd: LC = .SOURCE_VERSION,
/// sizeof(source_version_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(source_version_command),
/// A.B.C.D.E packed as a24.b10.c10.d10.e10
version: u64,
@@ -155,13 +155,13 @@ pub const entry_point_command = extern struct {
cmd: LC = .MAIN,
/// sizeof(struct entry_point_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(entry_point_command),
/// file (__TEXT) offset of main()
- entryoff: u64,
+ entryoff: u64 = 0,
/// if not zero, initial stack size
- stacksize: u64,
+ stacksize: u64 = 0,
};
/// The symtab_command contains the offsets and sizes of the link-edit 4.3BSD
@@ -172,19 +172,19 @@ pub const symtab_command = extern struct {
cmd: LC = .SYMTAB,
/// sizeof(struct symtab_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(symtab_command),
/// symbol table offset
- symoff: u32,
+ symoff: u32 = 0,
/// number of symbol table entries
- nsyms: u32,
+ nsyms: u32 = 0,
/// string table offset
- stroff: u32,
+ stroff: u32 = 0,
/// string table size in bytes
- strsize: u32,
+ strsize: u32 = 0,
};
/// This is the second set of the symbolic information which is used to support
@@ -230,7 +230,7 @@ pub const dysymtab_command = extern struct {
cmd: LC = .DYSYMTAB,
/// sizeof(struct dysymtab_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(dysymtab_command),
// The symbols indicated by symoff and nsyms of the LC_SYMTAB load command
// are grouped into the following three groups:
@@ -247,22 +247,22 @@ pub const dysymtab_command = extern struct {
// table when this is a dynamically linked shared library file).
/// index of local symbols
- ilocalsym: u32,
+ ilocalsym: u32 = 0,
/// number of local symbols
- nlocalsym: u32,
+ nlocalsym: u32 = 0,
/// index to externally defined symbols
- iextdefsym: u32,
+ iextdefsym: u32 = 0,
/// number of externally defined symbols
- nextdefsym: u32,
+ nextdefsym: u32 = 0,
/// index to undefined symbols
- iundefsym: u32,
+ iundefsym: u32 = 0,
/// number of undefined symbols
- nundefsym: u32,
+ nundefsym: u32 = 0,
// For the for the dynamic binding process to find which module a symbol
// is defined in the table of contents is used (analogous to the ranlib
@@ -272,10 +272,10 @@ pub const dysymtab_command = extern struct {
// symbols are sorted by name and is use as the table of contents.
/// file offset to table of contents
- tocoff: u32,
+ tocoff: u32 = 0,
/// number of entries in table of contents
- ntoc: u32,
+ ntoc: u32 = 0,
// To support dynamic binding of "modules" (whole object files) the symbol
// table must reflect the modules that the file was created from. This is
@@ -286,10 +286,10 @@ pub const dysymtab_command = extern struct {
// contains one module so everything in the file belongs to the module.
/// file offset to module table
- modtaboff: u32,
+ modtaboff: u32 = 0,
/// number of module table entries
- nmodtab: u32,
+ nmodtab: u32 = 0,
// To support dynamic module binding the module structure for each module
// indicates the external references (defined and undefined) each module
@@ -300,10 +300,10 @@ pub const dysymtab_command = extern struct {
// undefined external symbols indicates the external references.
/// offset to referenced symbol table
- extrefsymoff: u32,
+ extrefsymoff: u32 = 0,
/// number of referenced symbol table entries
- nextrefsyms: u32,
+ nextrefsyms: u32 = 0,
// The sections that contain "symbol pointers" and "routine stubs" have
// indexes and (implied counts based on the size of the section and fixed
@@ -315,10 +315,10 @@ pub const dysymtab_command = extern struct {
// The indirect symbol table is ordered to match the entries in the section.
/// file offset to the indirect symbol table
- indirectsymoff: u32,
+ indirectsymoff: u32 = 0,
/// number of indirect symbol table entries
- nindirectsyms: u32,
+ nindirectsyms: u32 = 0,
// To support relocating an individual module in a library file quickly the
// external relocation entries for each module in the library need to be
@@ -347,20 +347,20 @@ pub const dysymtab_command = extern struct {
// remaining relocation entries must be local).
/// offset to external relocation entries
- extreloff: u32,
+ extreloff: u32 = 0,
/// number of external relocation entries
- nextrel: u32,
+ nextrel: u32 = 0,
// All the local relocation entries are grouped together (they are not
// grouped by their module since they are only used if the object is moved
// from it staticly link edited address).
/// offset to local relocation entries
- locreloff: u32,
+ locreloff: u32 = 0,
/// number of local relocation entries
- nlocrel: u32,
+ nlocrel: u32 = 0,
};
/// The linkedit_data_command contains the offsets and sizes of a blob
@@ -370,13 +370,13 @@ pub const linkedit_data_command = extern struct {
cmd: LC,
/// sizeof(struct linkedit_data_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(linkedit_data_command),
/// file offset of data in __LINKEDIT segment
- dataoff: u32,
+ dataoff: u32 = 0,
/// file size of data in __LINKEDIT segment
- datasize: u32,
+ datasize: u32 = 0,
};
/// The dyld_info_command contains the file offsets and sizes of
@@ -387,10 +387,10 @@ pub const linkedit_data_command = extern struct {
/// to interpret it.
pub const dyld_info_command = extern struct {
/// LC_DYLD_INFO or LC_DYLD_INFO_ONLY
- cmd: LC,
+ cmd: LC = .DYLD_INFO_ONLY,
/// sizeof(struct dyld_info_command)
- cmdsize: u32,
+ cmdsize: u32 = @sizeOf(dyld_info_command),
// Dyld rebases an image whenever dyld loads it at an address different
// from its preferred address. The rebase information is a stream
@@ -403,10 +403,10 @@ pub const dyld_info_command = extern struct {
// bytes.
/// file offset to rebase info
- rebase_off: u32,
+ rebase_off: u32 = 0,
/// size of rebase info
- rebase_size: u32,
+ rebase_size: u32 = 0,
// Dyld binds an image during the loading process, if the image
// requires any pointers to be initialized to symbols in other images.
@@ -420,10 +420,10 @@ pub const dyld_info_command = extern struct {
// encoded in a few bytes.
/// file offset to binding info
- bind_off: u32,
+ bind_off: u32 = 0,
/// size of binding info
- bind_size: u32,
+ bind_size: u32 = 0,
// Some C++ programs require dyld to unique symbols so that all
// images in the process use the same copy of some code/data.
@@ -440,10 +440,10 @@ pub const dyld_info_command = extern struct {
// and the call to operator new is then rebound.
/// file offset to weak binding info
- weak_bind_off: u32,
+ weak_bind_off: u32 = 0,
/// size of weak binding info
- weak_bind_size: u32,
+ weak_bind_size: u32 = 0,
// Some uses of external symbols do not need to be bound immediately.
// Instead they can be lazily bound on first use. The lazy_bind
@@ -457,10 +457,10 @@ pub const dyld_info_command = extern struct {
// to bind.
/// file offset to lazy binding info
- lazy_bind_off: u32,
+ lazy_bind_off: u32 = 0,
/// size of lazy binding info
- lazy_bind_size: u32,
+ lazy_bind_size: u32 = 0,
// The symbols exported by a dylib are encoded in a trie. This
// is a compact representation that factors out common prefixes.
@@ -494,10 +494,10 @@ pub const dyld_info_command = extern struct {
// edge points to.
/// file offset to lazy binding info
- export_off: u32,
+ export_off: u32 = 0,
/// size of lazy binding info
- export_size: u32,
+ export_size: u32 = 0,
};
/// A program that uses a dynamic linker contains a dylinker_command to identify
@@ -1912,3 +1912,188 @@ pub const LoadCommandIterator = struct {
return cmd;
}
};
+
+pub const compact_unwind_encoding_t = u32;
+
+// Relocatable object files: __LD,__compact_unwind
+
+pub const compact_unwind_entry = extern struct {
+ rangeStart: u64,
+ rangeLength: u32,
+ compactUnwindEncoding: u32,
+ personalityFunction: u64,
+ lsda: u64,
+};
+
+// Final linked images: __TEXT,__unwind_info
+// The __TEXT,__unwind_info section is laid out for an efficient two level lookup.
+// The header of the section contains a coarse index that maps function address
+// to the page (4096 byte block) containing the unwind info for that function.
+
+pub const UNWIND_SECTION_VERSION = 1;
+
+pub const unwind_info_section_header = extern struct {
+ /// UNWIND_SECTION_VERSION
+ version: u32 = UNWIND_SECTION_VERSION,
+ commonEncodingsArraySectionOffset: u32,
+ commonEncodingsArrayCount: u32,
+ personalityArraySectionOffset: u32,
+ personalityArrayCount: u32,
+ indexSectionOffset: u32,
+ indexCount: u32,
+ // compact_unwind_encoding_t[]
+ // uint32_t personalities[]
+ // unwind_info_section_header_index_entry[]
+ // unwind_info_section_header_lsda_index_entry[]
+};
+
+pub const unwind_info_section_header_index_entry = extern struct {
+ functionOffset: u32,
+
+ /// section offset to start of regular or compress page
+ secondLevelPagesSectionOffset: u32,
+
+ /// section offset to start of lsda_index array for this range
+ lsdaIndexArraySectionOffset: u32,
+};
+
+pub const unwind_info_section_header_lsda_index_entry = extern struct {
+ functionOffset: u32,
+ lsdaOffset: u32,
+};
+
+// There are two kinds of second level index pages: regular and compressed.
+// A compressed page can hold up to 1021 entries, but it cannot be used if
+// too many different encoding types are used. The regular page holds 511
+// entries.
+
+pub const unwind_info_regular_second_level_entry = extern struct {
+ functionOffset: u32,
+ encoding: compact_unwind_encoding_t,
+};
+
+pub const UNWIND_SECOND_LEVEL = enum(u32) {
+ REGULAR = 2,
+ COMPRESSED = 3,
+ _,
+};
+
+pub const unwind_info_regular_second_level_page_header = extern struct {
+ /// UNWIND_SECOND_LEVEL_REGULAR
+ kind: UNWIND_SECOND_LEVEL = .REGULAR,
+
+ entryPageOffset: u16,
+ entryCount: u16,
+ // entry array
+};
+
+pub const unwind_info_compressed_second_level_page_header = extern struct {
+ /// UNWIND_SECOND_LEVEL_COMPRESSED
+ kind: UNWIND_SECOND_LEVEL = .COMPRESSED,
+
+ entryPageOffset: u16,
+ entryCount: u16,
+ encodingsPageOffset: u16,
+ encodingsCount: u16,
+ // 32bit entry array
+ // encodings array
+};
+
+pub const UnwindInfoCompressedEntry = packed struct {
+ funcOffset: u24,
+ encodingIndex: u8,
+};
+
+// TODO add corresponding x86_64 tagged union
+pub const UnwindEncodingArm64 = union(enum) {
+ frame: Frame,
+ frameless: Frameless,
+ dwarf: Dwarf,
+
+ pub const Frame = packed struct {
+ x_reg_pairs: packed struct {
+ x19_x20: u1,
+ x21_x22: u1,
+ x23_x24: u1,
+ x25_x26: u1,
+ x27_x28: u1,
+ },
+ d_reg_pairs: packed struct {
+ d8_d9: u1,
+ d10_d11: u1,
+ d12_d13: u1,
+ d14_d15: u1,
+ },
+ unused: u15,
+ mode: Mode = .frame,
+ personality_index: u2,
+ has_lsda: u1,
+ start: u1,
+ };
+
+ pub const Frameless = packed struct {
+ unused: u12 = 0,
+ stack_size: u12,
+ mode: Mode = .frameless,
+ personality_index: u2,
+ has_lsda: u1,
+ start: u1,
+ };
+
+ pub const Dwarf = packed struct {
+ section_offset: u24,
+ mode: Mode = .dwarf,
+ personality_index: u2,
+ has_lsda: u1,
+ start: u1,
+ };
+
+ pub const Mode = enum(u4) {
+ frameless = 0x2,
+ dwarf = 0x3,
+ frame = 0x4,
+ _,
+ };
+
+ pub const mode_mask: u32 = 0x0F000000;
+
+ pub fn fromU32(enc: u32) !UnwindEncodingArm64 {
+ const m = (enc & mode_mask) >> 24;
+ return switch (@intToEnum(Mode, m)) {
+ .frame => .{ .frame = @bitCast(Frame, enc) },
+ .frameless => .{ .frameless = @bitCast(Frameless, enc) },
+ .dwarf => .{ .dwarf = @bitCast(Dwarf, enc) },
+ else => return error.UnknownEncoding,
+ };
+ }
+
+ pub fn toU32(enc: UnwindEncodingArm64) u32 {
+ return switch (enc) {
+ inline else => |x| @bitCast(u32, x),
+ };
+ }
+
+ pub fn start(enc: UnwindEncodingArm64) bool {
+ return switch (enc) {
+ inline else => |x| x.start == 0b1,
+ };
+ }
+
+ pub fn hasLsda(enc: UnwindEncodingArm64) bool {
+ return switch (enc) {
+ inline else => |x| x.has_lsda == 0b1,
+ };
+ }
+
+ pub fn personalityIndex(enc: UnwindEncodingArm64) u2 {
+ return switch (enc) {
+ inline else => |x| x.personality_index,
+ };
+ }
+
+ pub fn mode(enc: UnwindEncodingArm64) Mode {
+ return switch (enc) {
+ inline else => |x| x.mode,
+ };
+ }
+};
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 58b6585884..c8eff2362e 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -468,21 +468,26 @@ test "clamp" {
/// Returns the product of a and b. Returns an error on overflow.
pub fn mul(comptime T: type, a: T, b: T) (error{Overflow}!T) {
- var answer: T = undefined;
- return if (@mulWithOverflow(T, a, b, &answer)) error.Overflow else answer;
+ if (T == comptime_int) return a * b;
+ const ov = @mulWithOverflow(a, b);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
/// Returns the sum of a and b. Returns an error on overflow.
pub fn add(comptime T: type, a: T, b: T) (error{Overflow}!T) {
if (T == comptime_int) return a + b;
- var answer: T = undefined;
- return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer;
+ const ov = @addWithOverflow(a, b);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
/// Returns a - b, or an error on overflow.
pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) {
- var answer: T = undefined;
- return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer;
+ if (T == comptime_int) return a - b;
+ const ov = @subWithOverflow(a, b);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
pub fn negate(x: anytype) !@TypeOf(x) {
@@ -492,8 +497,10 @@ pub fn negate(x: anytype) !@TypeOf(x) {
/// Shifts a left by shift_amt. Returns an error on overflow. shift_amt
/// is unsigned.
pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
- var answer: T = undefined;
- return if (@shlWithOverflow(T, a, shift_amt, &answer)) error.Overflow else answer;
+ if (T == comptime_int) return a << shift_amt;
+ const ov = @shlWithOverflow(a, shift_amt);
+ if (ov[1] != 0) return error.Overflow;
+ return ov[0];
}
/// Shifts left. Overflowed bits are truncated.
@@ -523,6 +530,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
}
test "shl" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
try testing.expect(shl(u8, 0b11111111, @as(usize, 3)) == 0b11111000);
try testing.expect(shl(u8, 0b11111111, @as(usize, 8)) == 0);
try testing.expect(shl(u8, 0b11111111, @as(usize, 9)) == 0);
@@ -563,6 +574,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
}
test "shr" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
try testing.expect(shr(u8, 0b11111111, @as(usize, 3)) == 0b00011111);
try testing.expect(shr(u8, 0b11111111, @as(usize, 8)) == 0);
try testing.expect(shr(u8, 0b11111111, @as(usize, 9)) == 0);
@@ -604,6 +619,10 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T {
}
test "rotr" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
try testing.expect(rotr(u0, 0b0, @as(usize, 3)) == 0b0);
try testing.expect(rotr(u5, 0b00001, @as(usize, 0)) == 0b00001);
try testing.expect(rotr(u6, 0b000001, @as(usize, 7)) == 0b100000);
@@ -644,6 +663,10 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T {
}
test "rotl" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
try testing.expect(rotl(u0, 0b0, @as(usize, 3)) == 0b0);
try testing.expect(rotl(u5, 0b00001, @as(usize, 0)) == 0b00001);
try testing.expect(rotl(u6, 0b000001, @as(usize, 7)) == 0b000010);
@@ -761,15 +784,31 @@ fn testOverflow() !void {
/// See also: `absCast`
pub fn absInt(x: anytype) !@TypeOf(x) {
const T = @TypeOf(x);
- comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
- comptime assert(@typeInfo(T).Int.signedness == .signed); // must pass a signed integer to absInt
-
- if (x == minInt(T)) {
- return error.Overflow;
- } else {
- @setRuntimeSafety(false);
- return if (x < 0) -x else x;
- }
+ return switch (@typeInfo(T)) {
+ .Int => |info| {
+ comptime assert(info.signedness == .signed); // must pass a signed integer to absInt
+ if (x == minInt(T)) {
+ return error.Overflow;
+ } else {
+ @setRuntimeSafety(false);
+ return if (x < 0) -x else x;
+ }
+ },
+ .Vector => |vinfo| blk: {
+ switch (@typeInfo(vinfo.child)) {
+ .Int => |info| {
+ comptime assert(info.signedness == .signed); // must pass a signed integer to absInt
+ if (@reduce(.Or, x == @splat(vinfo.len, @as(vinfo.child, minInt(vinfo.child))))) {
+ return error.Overflow;
+ }
+ const zero = @splat(vinfo.len, @as(vinfo.child, 0));
+ break :blk @select(vinfo.child, x > zero, x, -x);
+ },
+ else => @compileError("Expected vector of ints, found " ++ @typeName(T)),
+ }
+ },
+ else => @compileError("Expected an int or vector, found " ++ @typeName(T)),
+ };
}
test "absInt" {
@@ -779,6 +818,10 @@ test "absInt" {
fn testAbsInt() !void {
try testing.expect((absInt(@as(i32, -10)) catch unreachable) == 10);
try testing.expect((absInt(@as(i32, 10)) catch unreachable) == 10);
+ try testing.expectEqual(@Vector(3, i32){ 10, 10, 0 }, (absInt(@Vector(3, i32){ -10, 10, 0 }) catch unreachable));
+
+ try testing.expectError(error.Overflow, absInt(@as(i32, minInt(i32))));
+ try testing.expectError(error.Overflow, absInt(@Vector(3, i32){ 10, -10, minInt(i32) }));
}
/// Divide numerator by denominator, rounding toward zero. Returns an
@@ -1437,6 +1480,19 @@ pub const CompareOperator = enum {
gt,
/// Not equal (`!=`)
neq,
+
+ /// Reverse the direction of the comparison.
+ /// Use when swapping the left and right hand operands.
+ pub fn reverse(op: CompareOperator) CompareOperator {
+ return switch (op) {
+ .lt => .gt,
+ .lte => .gte,
+ .gt => .lt,
+ .gte => .lte,
+ .eq => .eq,
+ .neq => .neq,
+ };
+ }
};
/// This function does the same thing as comparison operators, however the
@@ -1496,6 +1552,15 @@ test "order.compare" {
try testing.expect(order(1, 0).compare(.neq));
}
+test "compare.reverse" {
+ inline for (@typeInfo(CompareOperator).Enum.fields) |op_field| {
+ const op = @intToEnum(CompareOperator, op_field.value);
+ try testing.expect(compare(2, op, 3) == compare(3, op.reverse(), 2));
+ try testing.expect(compare(3, op, 3) == compare(3, op.reverse(), 3));
+ try testing.expect(compare(4, op, 3) == compare(3, op.reverse(), 4));
+ }
+}
+
/// Returns a mask of all ones if value is true,
/// and a mask of all zeroes if value is false.
/// Compiles to one instruction for register sized integers.
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index c301874451..d222d6913b 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -74,42 +74,40 @@ pub fn calcTwosCompLimbCount(bit_count: usize) usize {
/// a + b * c + *carry, sets carry to the overflow bits
pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
@setRuntimeSafety(debug_safety);
- var r1: Limb = undefined;
- // r1 = a + *carry
- const c1: Limb = @boolToInt(@addWithOverflow(Limb, a, carry.*, &r1));
+ // ov1[0] = a + *carry
+ const ov1 = @addWithOverflow(a, carry.*);
// r2 = b * c
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
const c2 = @truncate(Limb, bc >> limb_bits);
- // r1 = r1 + r2
- const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
+ // ov2[0] = ov1[0] + r2
+ const ov2 = @addWithOverflow(ov1[0], r2);
// This never overflows, c1, c3 are either 0 or 1 and if both are 1 then
// c2 is at least <= maxInt(Limb) - 2.
- carry.* = c1 + c2 + c3;
+ carry.* = ov1[1] + c2 + ov2[1];
- return r1;
+ return ov2[0];
}
/// a - b * c - *carry, sets carry to the overflow bits
fn subMulLimbWithBorrow(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
- // r1 = a - *carry
- var r1: Limb = undefined;
- const c1: Limb = @boolToInt(@subWithOverflow(Limb, a, carry.*, &r1));
+ // ov1[0] = a - *carry
+ const ov1 = @subWithOverflow(a, carry.*);
// r2 = b * c
const bc = @as(DoubleLimb, std.math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
const c2 = @truncate(Limb, bc >> limb_bits);
- // r1 = r1 - r2
- const c3: Limb = @boolToInt(@subWithOverflow(Limb, r1, r2, &r1));
- carry.* = c1 + c2 + c3;
+ // ov2[0] = ov1[0] - r2
+ const ov2 = @subWithOverflow(ov1[0], r2);
+ carry.* = ov1[1] + c2 + ov2[1];
- return r1;
+ return ov2[0];
}
/// Used to indicate either limit of a 2s-complement integer.
@@ -673,7 +671,9 @@ pub const Mutable = struct {
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
if (a.limbs.len == 1 and b.limbs.len == 1) {
- if (!@mulWithOverflow(Limb, a.limbs[0], b.limbs[0], &rma.limbs[0])) {
+ const ov = @mulWithOverflow(a.limbs[0], b.limbs[0]);
+ rma.limbs[0] = ov[0];
+ if (ov[1] == 0) {
rma.len = 1;
rma.positive = (a.positive == b.positive);
return;
@@ -1677,6 +1677,40 @@ pub const Mutable = struct {
y.shiftRight(y.toConst(), norm_shift);
}
+ /// If a is positive, this passes through to truncate.
+ /// If a is negative, then r is set to positive with the bit pattern ~(a - 1).
+ ///
+ /// Asserts `r` has enough storage to store the result.
+ /// The upper bound is `calcTwosCompLimbCount(a.len)`.
+ pub fn convertToTwosComplement(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
+ if (a.positive) {
+ r.truncate(a, signedness, bit_count);
+ return;
+ }
+
+ const req_limbs = calcTwosCompLimbCount(bit_count);
+ if (req_limbs == 0 or a.eqZero()) {
+ r.set(0);
+ return;
+ }
+
+ const bit = @truncate(Log2Limb, bit_count - 1);
+ const signmask = @as(Limb, 1) << bit;
+ const mask = (signmask << 1) -% 1;
+
+ r.addScalar(a.abs(), -1);
+ if (req_limbs > r.len) {
+ mem.set(Limb, r.limbs[r.len..req_limbs], 0);
+ }
+
+ assert(r.limbs.len >= req_limbs);
+ r.len = req_limbs;
+
+ llnot(r.limbs[0..r.len]);
+ r.limbs[r.len - 1] &= mask;
+ r.normalize(r.len);
+ }
+
/// Truncate an integer to a number of bits, following 2s-complement semantics.
/// r may alias a.
///
@@ -1836,7 +1870,11 @@ pub const Mutable = struct {
bit_index += @bitSizeOf(Limb);
// 2's complement (bitwise not, then add carry bit)
- if (!positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
+ if (!positive) {
+ const ov = @addWithOverflow(~limb, carry);
+ limb = ov[0];
+ carry = ov[1];
+ }
x.limbs[limb_index] = limb;
}
@@ -1853,7 +1891,11 @@ pub const Mutable = struct {
};
// 2's complement (bitwise not, then add carry bit)
- if (!positive) assert(!@addWithOverflow(Limb, ~limb, carry, &limb));
+ if (!positive) {
+ const ov = @addWithOverflow(~limb, carry);
+ assert(ov[1] == 0);
+ limb = ov[0];
+ }
x.limbs[limb_index] = limb;
limb_index += 1;
@@ -2000,7 +2042,9 @@ pub const Const = struct {
// All but the most significant limb.
for (self.limbs[0 .. self.limbs.len - 1]) |limb| {
- carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &add_res));
+ const ov = @addWithOverflow(~limb, carry);
+ add_res = ov[0];
+ carry = ov[1];
sum += @popCount(add_res);
remaining_bits -= limb_bits; // Asserted not to undeflow by fitsInTwosComp
}
@@ -2294,7 +2338,11 @@ pub const Const = struct {
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
// 2's complement (bitwise not, then add carry bit)
- if (!x.positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
+ if (!x.positive) {
+ const ov = @addWithOverflow(~limb, carry);
+ limb = ov[0];
+ carry = ov[1];
+ }
// Write one Limb of bits
mem.writePackedInt(Limb, bytes, bit_index + bit_offset, limb, endian);
@@ -2306,7 +2354,7 @@ pub const Const = struct {
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
// 2's complement (bitwise not, then add carry bit)
- if (!x.positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
+ if (!x.positive) limb = ~limb +% carry;
// Write all remaining bits
mem.writeVarPackedInt(bytes, bit_index + bit_offset, bit_count - bit_index, limb, endian);
@@ -3360,14 +3408,17 @@ fn llaccum(comptime op: AccOp, r: []Limb, a: []const Limb) void {
var carry: Limb = 0;
while (i < a.len) : (i += 1) {
- var c: Limb = 0;
- c += @boolToInt(@addWithOverflow(Limb, r[i], a[i], &r[i]));
- c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i]));
- carry = c;
+ const ov1 = @addWithOverflow(r[i], a[i]);
+ r[i] = ov1[0];
+ const ov2 = @addWithOverflow(r[i], carry);
+ r[i] = ov2[0];
+ carry = @as(Limb, ov1[1]) + ov2[1];
}
while ((carry != 0) and i < r.len) : (i += 1) {
- carry = @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i]));
+ const ov = @addWithOverflow(r[i], carry);
+ r[i] = ov[0];
+ carry = ov[1];
}
}
@@ -3435,7 +3486,9 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool {
j = 0;
while ((carry != 0) and (j < a_hi.len)) : (j += 1) {
- carry = @boolToInt(@addWithOverflow(Limb, a_hi[j], carry, &a_hi[j]));
+ const ov = @addWithOverflow(a_hi[j], carry);
+ a_hi[j] = ov[0];
+ carry = ov[1];
}
return carry != 0;
@@ -3449,7 +3502,9 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool {
j = 0;
while ((borrow != 0) and (j < a_hi.len)) : (j += 1) {
- borrow = @boolToInt(@subWithOverflow(Limb, a_hi[j], borrow, &a_hi[j]));
+ const ov = @subWithOverflow(a_hi[j], borrow);
+ a_hi[j] = ov[0];
+ borrow = ov[1];
}
return borrow != 0;
@@ -3482,14 +3537,17 @@ fn llsubcarry(r: []Limb, a: []const Limb, b: []const Limb) Limb {
var borrow: Limb = 0;
while (i < b.len) : (i += 1) {
- var c: Limb = 0;
- c += @boolToInt(@subWithOverflow(Limb, a[i], b[i], &r[i]));
- c += @boolToInt(@subWithOverflow(Limb, r[i], borrow, &r[i]));
- borrow = c;
+ const ov1 = @subWithOverflow(a[i], b[i]);
+ r[i] = ov1[0];
+ const ov2 = @subWithOverflow(r[i], borrow);
+ r[i] = ov2[0];
+ borrow = @as(Limb, ov1[1]) + ov2[1];
}
while (i < a.len) : (i += 1) {
- borrow = @boolToInt(@subWithOverflow(Limb, a[i], borrow, &r[i]));
+ const ov = @subWithOverflow(a[i], borrow);
+ r[i] = ov[0];
+ borrow = ov[1];
}
return borrow;
@@ -3512,14 +3570,17 @@ fn lladdcarry(r: []Limb, a: []const Limb, b: []const Limb) Limb {
var carry: Limb = 0;
while (i < b.len) : (i += 1) {
- var c: Limb = 0;
- c += @boolToInt(@addWithOverflow(Limb, a[i], b[i], &r[i]));
- c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i]));
- carry = c;
+ const ov1 = @addWithOverflow(a[i], b[i]);
+ r[i] = ov1[0];
+ const ov2 = @addWithOverflow(r[i], carry);
+ r[i] = ov2[0];
+ carry = @as(Limb, ov1[1]) + ov2[1];
}
while (i < a.len) : (i += 1) {
- carry = @boolToInt(@addWithOverflow(Limb, a[i], carry, &r[i]));
+ const ov = @addWithOverflow(a[i], carry);
+ r[i] = ov[0];
+ carry = ov[1];
}
return carry;
@@ -3685,11 +3746,11 @@ fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_p
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
-
- r[i] = a_limb & ~b[i];
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @addWithOverflow(ov1[0] & ~b[i], r_carry);
+ r[i] = ov2[0];
+ r_carry = ov2[1];
}
// In order for r_carry to be nonzero at this point, ~b[i] would need to be
@@ -3702,7 +3763,9 @@ fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_p
// Note, if a_borrow is zero we do not need to compute anything for
// the higher limbs so we can early return here.
while (i < a.len and a_borrow == 1) : (i += 1) {
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
+ const ov = @subWithOverflow(a[i], a_borrow);
+ r[i] = ov[0];
+ a_borrow = ov[1];
}
assert(a_borrow == 0); // a was 0.
@@ -3721,11 +3784,11 @@ fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_p
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
- var b_limb: Limb = undefined;
- b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
-
- r[i] = ~a[i] & b_limb;
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(b[i], b_borrow);
+ b_borrow = ov1[1];
+ const ov2 = @addWithOverflow(~a[i] & ov1[0], r_carry);
+ r[i] = ov2[0];
+ r_carry = ov2[1];
}
// b is at least 1, so this should never underflow.
@@ -3752,14 +3815,13 @@ fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_p
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
-
- var b_limb: Limb = undefined;
- b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
-
- r[i] = a_limb & b_limb;
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @subWithOverflow(b[i], b_borrow);
+ b_borrow = ov2[1];
+ const ov3 = @addWithOverflow(ov1[0] & ov2[0], r_carry);
+ r[i] = ov3[0];
+ r_carry = ov3[1];
}
// b is at least 1, so this should never underflow.
@@ -3811,9 +3873,9 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
var a_borrow: u1 = 1;
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
- r[i] = ~a_limb & b[i];
+ const ov = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov[1];
+ r[i] = ~ov[0] & b[i];
}
// With b = 0 we have ~(a - 1) & 0 = 0, so the upper bytes are zero.
@@ -3830,9 +3892,9 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
var b_borrow: u1 = 1;
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &a_limb));
- r[i] = a[i] & ~a_limb;
+ const ov = @subWithOverflow(b[i], b_borrow);
+ b_borrow = ov[1];
+ r[i] = a[i] & ~ov[0];
}
assert(b_borrow == 0); // b was 0
@@ -3855,14 +3917,13 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
-
- var b_limb: Limb = undefined;
- b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
-
- r[i] = a_limb | b_limb;
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @subWithOverflow(b[i], b_borrow);
+ b_borrow = ov2[1];
+ const ov3 = @addWithOverflow(ov1[0] | ov2[0], r_carry);
+ r[i] = ov3[0];
+ r_carry = ov3[1];
}
// b is at least 1, so this should never underflow.
@@ -3870,8 +3931,11 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
// With b = 0 and b_borrow = 0 we get (-a - 1) | (-0 - 0) = (-a - 1) | 0 = -a - 1.
while (i < a.len) : (i += 1) {
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @addWithOverflow(ov1[0], r_carry);
+ r[i] = ov2[0];
+ r_carry = ov2[1];
}
assert(a_borrow == 0); // a was 0.
@@ -3917,19 +3981,21 @@ fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
var r_carry = @boolToInt(a_positive != b_positive);
while (i < b.len) : (i += 1) {
- var a_limb: Limb = undefined;
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
-
- var b_limb: Limb = undefined;
- b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
-
- r[i] = a_limb ^ b_limb;
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @subWithOverflow(b[i], b_borrow);
+ b_borrow = ov2[1];
+ const ov3 = @addWithOverflow(ov1[0] ^ ov2[0], r_carry);
+ r[i] = ov3[0];
+ r_carry = ov3[1];
}
while (i < a.len) : (i += 1) {
- a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
- r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ const ov1 = @subWithOverflow(a[i], a_borrow);
+ a_borrow = ov1[1];
+ const ov2 = @addWithOverflow(ov1[0], r_carry);
+ r[i] = ov2[0];
+ r_carry = ov2[1];
}
// If both inputs don't share the same sign, an extra limb is required.
@@ -4021,7 +4087,9 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
llsquareBasecase(tmp2, tmp1[0..llnormalize(tmp1)]);
mem.swap([]Limb, &tmp1, &tmp2);
// Multiply by a
- if (@shlWithOverflow(u32, exp, 1, &exp)) {
+ const ov = @shlWithOverflow(exp, 1);
+ exp = ov[0];
+ if (ov[1] != 0) {
mem.set(Limb, tmp2, 0);
llmulacc(.add, null, tmp2, tmp1[0..llnormalize(tmp1)], a);
mem.swap([]Limb, &tmp1, &tmp2);
diff --git a/lib/std/math/powi.zig b/lib/std/math/powi.zig
index 2e615de5d5..d7d07985eb 100644
--- a/lib/std/math/powi.zig
+++ b/lib/std/math/powi.zig
@@ -70,22 +70,22 @@ pub fn powi(comptime T: type, x: T, y: T) (error{
while (exp > 1) {
if (exp & 1 == 1) {
- if (@mulWithOverflow(T, acc, base, &acc)) {
- return error.Overflow;
- }
+ const ov = @mulWithOverflow(acc, base);
+ if (ov[1] != 0) return error.Overflow;
+ acc = ov[0];
}
exp >>= 1;
- if (@mulWithOverflow(T, base, base, &base)) {
- return error.Overflow;
- }
+ const ov = @mulWithOverflow(base, base);
+ if (ov[1] != 0) return error.Overflow;
+ base = ov[0];
}
if (exp == 1) {
- if (@mulWithOverflow(T, acc, base, &acc)) {
- return error.Overflow;
- }
+ const ov = @mulWithOverflow(acc, base);
+ if (ov[1] != 0) return error.Overflow;
+ acc = ov[0];
}
return acc;
diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig
index 871cc58e47..e642f8a309 100644
--- a/lib/std/math/sqrt.zig
+++ b/lib/std/math/sqrt.zig
@@ -37,9 +37,12 @@ fn sqrt_int(comptime T: type, value: T) Sqrt(T) {
if (@typeInfo(T).Int.bits <= 2) {
return if (value == 0) 0 else 1; // shortcut for small number of bits to simplify general case
} else {
+ const bits = @typeInfo(T).Int.bits;
+ const max = math.maxInt(T);
+ const minustwo = (@as(T, 2) ^ max) + 1; // unsigned int cannot represent -2
var op = value;
var res: T = 0;
- var one: T = 1 << ((@typeInfo(T).Int.bits - 1) & -2); // highest power of four that fits into T
+ var one: T = 1 << ((bits - 1) & minustwo); // highest power of four that fits into T
// "one" starts at the highest power of four <= than the argument.
while (one > op) {
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index a6b7490dab..42b35281e0 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -300,7 +300,7 @@ pub fn zeroes(comptime T: type) T {
if (comptime meta.containerLayout(T) == .Extern) {
// The C language specification states that (global) unions
// should be zero initialized to the first named member.
- return @unionInit(T, info.fields[0].name, zeroes(info.fields[0].field_type));
+ return @unionInit(T, info.fields[0].name, zeroes(info.fields[0].type));
}
@compileError("Can't set a " ++ @typeName(T) ++ " to zero.");
@@ -435,7 +435,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
inline for (struct_info.fields) |field| {
if (field.default_value) |default_value_ptr| {
- const default_value = @ptrCast(*align(1) const field.field_type, default_value_ptr).*;
+ const default_value = @ptrCast(*align(1) const field.type, default_value_ptr).*;
@field(value, field.name) = default_value;
}
}
@@ -452,9 +452,9 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
@compileError("Encountered an initializer for `" ++ field.name ++ "`, but it is not a field of " ++ @typeName(T));
}
- switch (@typeInfo(field.field_type)) {
+ switch (@typeInfo(field.type)) {
.Struct => {
- @field(value, field.name) = zeroInit(field.field_type, @field(init, field.name));
+ @field(value, field.name) = zeroInit(field.type, @field(init, field.name));
},
else => {
@field(value, field.name) = @field(init, field.name);
@@ -3291,7 +3291,7 @@ pub fn nativeToBig(comptime T: type, x: T) T {
/// - The delta required to align the pointer is not a multiple of the pointee's
/// type.
pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize {
- assert(align_to != 0 and @popCount(align_to) == 1);
+ assert(isValidAlign(align_to));
const T = @TypeOf(ptr);
const info = @typeInfo(T);
@@ -3304,13 +3304,13 @@ pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize {
// Calculate the aligned base address with an eye out for overflow.
const addr = @ptrToInt(ptr);
- var new_addr: usize = undefined;
- if (@addWithOverflow(usize, addr, align_to - 1, &new_addr)) return null;
- new_addr &= ~@as(usize, align_to - 1);
+ var ov = @addWithOverflow(addr, align_to - 1);
+ if (ov[1] != 0) return null;
+ ov[0] &= ~@as(usize, align_to - 1);
// The delta is expressed in terms of bytes, turn it into a number of child
// type elements.
- const delta = new_addr - addr;
+ const delta = ov[0] - addr;
const pointee_size = @sizeOf(info.Pointer.child);
if (delta % pointee_size != 0) return null;
return delta / pointee_size;
@@ -3751,6 +3751,7 @@ pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
+ assert(isValidAlignGeneric(T, alignment));
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
}
@@ -3770,7 +3771,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
.Bool => doNotOptimizeAway(@boolToInt(val)),
.Int => {
const bits = t.Int.bits;
- if (bits <= max_gp_register_bits) {
+ if (bits <= max_gp_register_bits and builtin.zig_backend != .stage2_c) {
const val2 = @as(
std.meta.Int(t.Int.signedness, @max(8, std.math.ceilPowerOfTwoAssert(u16, bits))),
val,
@@ -3782,18 +3783,24 @@ pub fn doNotOptimizeAway(val: anytype) void {
} else doNotOptimizeAway(&val);
},
.Float => {
- if (t.Float.bits == 32 or t.Float.bits == 64) {
+ if ((t.Float.bits == 32 or t.Float.bits == 64) and builtin.zig_backend != .stage2_c) {
asm volatile (""
:
: [val] "rm" (val),
);
} else doNotOptimizeAway(&val);
},
- .Pointer => asm volatile (""
- :
- : [val] "m" (val),
- : "memory"
- ),
+ .Pointer => {
+ if (builtin.zig_backend == .stage2_c) {
+ doNotOptimizeAwayC(val);
+ } else {
+ asm volatile (""
+ :
+ : [val] "m" (val),
+ : "memory"
+ );
+ }
+ },
.Array => {
if (t.Array.len * @sizeOf(t.Array.child) <= 64) {
for (val) |v| doNotOptimizeAway(v);
@@ -3803,6 +3810,16 @@ pub fn doNotOptimizeAway(val: anytype) void {
}
}
+/// .stage2_c doesn't support asm blocks yet, so use volatile stores instead
+var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined;
+fn doNotOptimizeAwayC(ptr: anytype) void {
+ const dest = @ptrCast(*volatile u8, &deopt_target);
+ for (asBytes(ptr)) |b| {
+ dest.* = b;
+ }
+ dest.* = 0;
+}
+
test "doNotOptimizeAway" {
comptime doNotOptimizeAway("test");
@@ -3846,7 +3863,7 @@ test "alignForward" {
/// Round an address down to the previous (or current) aligned address.
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
- if (@popCount(alignment) == 1)
+ if (isValidAlign(alignment))
return alignBackward(i, alignment);
assert(alignment != 0);
return i - @mod(i, alignment);
@@ -3861,7 +3878,7 @@ pub fn alignBackward(addr: usize, alignment: usize) usize {
/// Round an address down to the previous (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
- assert(@popCount(alignment) == 1);
+ assert(isValidAlignGeneric(T, alignment));
// 000010000 // example alignment
// 000001111 // subtract 1
// 111110000 // binary not
@@ -3871,11 +3888,17 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
/// Returns whether `alignment` is a valid alignment, meaning it is
/// a positive power of 2.
pub fn isValidAlign(alignment: usize) bool {
- return @popCount(alignment) == 1;
+ return isValidAlignGeneric(usize, alignment);
+}
+
+/// Returns whether `alignment` is a valid alignment, meaning it is
+/// a positive power of 2.
+pub fn isValidAlignGeneric(comptime T: type, alignment: T) bool {
+ return alignment > 0 and std.math.isPowerOfTwo(alignment);
}
pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
- if (@popCount(alignment) == 1)
+ if (isValidAlign(alignment))
return isAligned(i, alignment);
assert(alignment != 0);
return 0 == @mod(i, alignment);
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index 9dd1a97baf..e97edf1718 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -144,22 +144,6 @@ test "std.meta.stringToEnum" {
try testing.expect(null == stringToEnum(E1, "C"));
}
-/// Deprecated, use `@bitSizeOf()`.
-/// TODO Remove this after zig 0.10.0 is released.
-pub fn bitCount(comptime T: type) comptime_int {
- return switch (@typeInfo(T)) {
- .Bool => 1,
- .Int => |info| info.bits,
- .Float => |info| info.bits,
- else => @compileError("Expected bool, int or float type, found '" ++ @typeName(T) ++ "'"),
- };
-}
-
-test "std.meta.bitCount" {
- try testing.expect(bitCount(u8) == 8);
- try testing.expect(bitCount(f32) == 32);
-}
-
/// Returns the alignment of type T.
/// Note that if T is a pointer or function type the result is different than
/// the one returned by @alignOf(T).
@@ -387,16 +371,12 @@ test "std.meta.assumeSentinel" {
pub fn containerLayout(comptime T: type) Type.ContainerLayout {
return switch (@typeInfo(T)) {
.Struct => |info| info.layout,
- .Enum => |info| info.layout,
.Union => |info| info.layout,
- else => @compileError("Expected struct, enum or union type, found '" ++ @typeName(T) ++ "'"),
+ else => @compileError("expected struct or union type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.containerLayout" {
- const E1 = enum {
- A,
- };
const S1 = struct {};
const S2 = packed struct {};
const S3 = extern struct {};
@@ -410,7 +390,6 @@ test "std.meta.containerLayout" {
a: u8,
};
- try testing.expect(containerLayout(E1) == .Auto);
try testing.expect(containerLayout(S1) == .Auto);
try testing.expect(containerLayout(S2) == .Packed);
try testing.expect(containerLayout(S3) == .Extern);
@@ -538,8 +517,8 @@ test "std.meta.fields" {
try testing.expect(mem.eql(u8, e2f[0].name, "A"));
try testing.expect(mem.eql(u8, sf[0].name, "a"));
try testing.expect(mem.eql(u8, uf[0].name, "a"));
- try testing.expect(comptime sf[0].field_type == u8);
- try testing.expect(comptime uf[0].field_type == u8);
+ try testing.expect(comptime sf[0].type == u8);
+ try testing.expect(comptime uf[0].type == u8);
}
pub fn fieldInfo(comptime T: type, comptime field: FieldEnum(T)) switch (@typeInfo(T)) {
@@ -573,8 +552,34 @@ test "std.meta.fieldInfo" {
try testing.expect(mem.eql(u8, e2f.name, "A"));
try testing.expect(mem.eql(u8, sf.name, "a"));
try testing.expect(mem.eql(u8, uf.name, "a"));
- try testing.expect(comptime sf.field_type == u8);
- try testing.expect(comptime uf.field_type == u8);
+ try testing.expect(comptime sf.type == u8);
+ try testing.expect(comptime uf.type == u8);
+}
+
+pub fn FieldType(comptime T: type, comptime field: FieldEnum(T)) type {
+ if (@typeInfo(T) != .Struct and @typeInfo(T) != .Union) {
+ @compileError("Expected struct or union, found '" ++ @typeName(T) ++ "'");
+ }
+
+ return fieldInfo(T, field).type;
+}
+
+test "std.meta.FieldType" {
+ const S = struct {
+ a: u8,
+ b: u16,
+ };
+
+ const U = union {
+ c: u32,
+ d: *const u8,
+ };
+
+ try testing.expect(FieldType(S, .a) == u8);
+ try testing.expect(FieldType(S, .b) == u16);
+
+ try testing.expect(FieldType(U, .c) == u32);
+ try testing.expect(FieldType(U, .d) == *const u8);
}
pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 {
@@ -650,7 +655,6 @@ pub fn FieldEnum(comptime T: type) type {
if (field_infos.len == 0) {
return @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u0,
.fields = &.{},
.decls = &.{},
@@ -680,7 +684,6 @@ pub fn FieldEnum(comptime T: type) type {
}
return @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = std.math.IntFittingRange(0, field_infos.len - 1),
.fields = &enumFields,
.decls = &decls,
@@ -693,10 +696,6 @@ fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
// TODO: https://github.com/ziglang/zig/issues/7419
// testing.expectEqual(@typeInfo(expected).Enum, @typeInfo(actual).Enum);
try testing.expectEqual(
- @typeInfo(expected).Enum.layout,
- @typeInfo(actual).Enum.layout,
- );
- try testing.expectEqual(
@typeInfo(expected).Enum.tag_type,
@typeInfo(actual).Enum.tag_type,
);
@@ -756,7 +755,6 @@ pub fn DeclEnum(comptime T: type) type {
}
return @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = std.math.IntFittingRange(0, fieldInfos.len - 1),
.fields = &enumDecls,
.decls = &decls,
@@ -838,21 +836,25 @@ test "std.meta.activeTag" {
const TagPayloadType = TagPayload;
-///Given a tagged union type, and an enum, return the type of the union
-/// field corresponding to the enum tag.
-pub fn TagPayload(comptime U: type, comptime tag: Tag(U)) type {
+pub fn TagPayloadByName(comptime U: type, comptime tag_name: []const u8) type {
comptime debug.assert(trait.is(.Union)(U));
const info = @typeInfo(U).Union;
inline for (info.fields) |field_info| {
- if (comptime mem.eql(u8, field_info.name, @tagName(tag)))
- return field_info.field_type;
+ if (comptime mem.eql(u8, field_info.name, tag_name))
+ return field_info.type;
}
unreachable;
}
+/// Given a tagged union type, and an enum, return the type of the union field
+/// corresponding to the enum tag.
+pub fn TagPayload(comptime U: type, comptime tag: Tag(U)) type {
+ return TagPayloadByName(U, @tagName(tag));
+}
+
test "std.meta.TagPayload" {
const Event = union(enum) {
Moved: struct {
@@ -1100,9 +1102,9 @@ pub fn ArgsTuple(comptime Function: type) type {
if (function_info.is_var_args)
@compileError("Cannot create ArgsTuple for variadic function");
- var argument_field_list: [function_info.args.len]type = undefined;
- inline for (function_info.args) |arg, i| {
- const T = arg.arg_type.?;
+ var argument_field_list: [function_info.params.len]type = undefined;
+ inline for (function_info.params) |arg, i| {
+ const T = arg.type.?;
argument_field_list[i] = T;
}
@@ -1127,7 +1129,7 @@ fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
var num_buf: [128]u8 = undefined;
tuple_fields[i] = .{
.name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
- .field_type = T,
+ .type = T,
.default_value = null,
.is_comptime = false,
.alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0,
@@ -1162,8 +1164,8 @@ const TupleTester = struct {
@compileError("Argument count mismatch");
inline for (fields_list) |fld, i| {
- if (expected[i] != fld.field_type) {
- @compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.field_type));
+ if (expected[i] != fld.type) {
+ @compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.type));
}
}
}
diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig
index 34fb17eb0a..e50950018d 100644
--- a/lib/std/meta/trailer_flags.zig
+++ b/lib/std/meta/trailer_flags.zig
@@ -24,10 +24,10 @@ pub fn TrailerFlags(comptime Fields: type) type {
inline for (@typeInfo(Fields).Struct.fields) |struct_field, i| {
fields[i] = Type.StructField{
.name = struct_field.name,
- .field_type = ?struct_field.field_type,
- .default_value = &@as(?struct_field.field_type, null),
+ .type = ?struct_field.type,
+ .default_value = &@as(?struct_field.type, null),
.is_comptime = false,
- .alignment = @alignOf(?struct_field.field_type),
+ .alignment = @alignOf(?struct_field.type),
};
}
break :blk @Type(.{
@@ -105,26 +105,26 @@ pub fn TrailerFlags(comptime Fields: type) type {
const active = (self.bits & (1 << i)) != 0;
if (i == @enumToInt(field)) {
assert(active);
- return mem.alignForwardGeneric(usize, off, @alignOf(field_info.field_type));
+ return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type));
} else if (active) {
- off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.field_type));
- off += @sizeOf(field_info.field_type);
+ off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type));
+ off += @sizeOf(field_info.type);
}
}
}
pub fn Field(comptime field: FieldEnum) type {
- return @typeInfo(Fields).Struct.fields[@enumToInt(field)].field_type;
+ return @typeInfo(Fields).Struct.fields[@enumToInt(field)].type;
}
pub fn sizeInBytes(self: Self) usize {
var off: usize = 0;
inline for (@typeInfo(Fields).Struct.fields) |field, i| {
- if (@sizeOf(field.field_type) == 0)
+ if (@sizeOf(field.type) == 0)
continue;
if ((self.bits & (1 << i)) != 0) {
- off = mem.alignForwardGeneric(usize, off, @alignOf(field.field_type));
- off += @sizeOf(field.field_type);
+ off = mem.alignForwardGeneric(usize, off, @alignOf(field.type));
+ off += @sizeOf(field.type);
}
}
return off;
diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig
index 2e9c2165f7..843ada7f56 100644
--- a/lib/std/meta/trait.zig
+++ b/lib/std/meta/trait.zig
@@ -154,7 +154,6 @@ pub fn isExtern(comptime T: type) bool {
return switch (@typeInfo(T)) {
.Struct => |s| s.layout == .Extern,
.Union => |u| u.layout == .Extern,
- .Enum => |e| e.layout == .Extern,
else => false,
};
}
@@ -172,7 +171,6 @@ pub fn isPacked(comptime T: type) bool {
return switch (@typeInfo(T)) {
.Struct => |s| s.layout == .Packed,
.Union => |u| u.layout == .Packed,
- .Enum => |e| e.layout == .Packed,
else => false,
};
}
@@ -566,7 +564,7 @@ pub fn hasUniqueRepresentation(comptime T: type) bool {
var sum_size = @as(usize, 0);
inline for (info.fields) |field| {
- const FieldType = field.field_type;
+ const FieldType = field.type;
if (comptime !hasUniqueRepresentation(FieldType)) return false;
sum_size += @sizeOf(FieldType);
}
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index e05b87c0be..38c8276b9a 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -84,9 +84,9 @@ pub fn MultiArrayList(comptime S: type) type {
var data: [fields.len]Data = undefined;
for (fields) |field_info, i| {
data[i] = .{
- .size = @sizeOf(field_info.field_type),
+ .size = @sizeOf(field_info.type),
.size_index = i,
- .alignment = if (@sizeOf(field_info.field_type) == 0) 1 else field_info.alignment,
+ .alignment = if (@sizeOf(field_info.type) == 0) 1 else field_info.alignment,
};
}
const Sort = struct {
@@ -294,10 +294,10 @@ pub fn MultiArrayList(comptime S: type) type {
) catch {
const self_slice = self.slice();
inline for (fields) |field_info, i| {
- if (@sizeOf(field_info.field_type) != 0) {
+ if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
- const byte_count = dest_slice.len * @sizeOf(field_info.field_type);
+ const byte_count = dest_slice.len * @sizeOf(field_info.type);
// We use memset here for more efficient codegen in safety-checked,
// valgrind-enabled builds. Otherwise the valgrind client request
// will be repeated for every element.
@@ -316,9 +316,9 @@ pub fn MultiArrayList(comptime S: type) type {
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
- if (@sizeOf(field_info.field_type) != 0) {
+ if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
- mem.copy(field_info.field_type, other_slice.items(field), self_slice.items(field));
+ mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
}
}
gpa.free(self.allocatedBytes());
@@ -377,9 +377,9 @@ pub fn MultiArrayList(comptime S: type) type {
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
- if (@sizeOf(field_info.field_type) != 0) {
+ if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
- mem.copy(field_info.field_type, other_slice.items(field), self_slice.items(field));
+ mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
}
}
gpa.free(self.allocatedBytes());
@@ -396,9 +396,9 @@ pub fn MultiArrayList(comptime S: type) type {
const self_slice = self.slice();
const result_slice = result.slice();
inline for (fields) |field_info, i| {
- if (@sizeOf(field_info.field_type) != 0) {
+ if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
- mem.copy(field_info.field_type, result_slice.items(field), self_slice.items(field));
+ mem.copy(field_info.type, result_slice.items(field), self_slice.items(field));
}
}
return result;
@@ -413,10 +413,10 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
inline for (fields) |field_info, i| {
- if (@sizeOf(field_info.field_type) != 0) {
+ if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const ptr = sc.slice.items(field);
- mem.swap(field_info.field_type, &ptr[a_index], &ptr[b_index]);
+ mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]);
}
}
}
@@ -449,7 +449,7 @@ pub fn MultiArrayList(comptime S: type) type {
}
fn FieldType(comptime field: Field) type {
- return meta.fieldInfo(S, field).field_type;
+ return meta.fieldInfo(S, field).type;
}
/// This function is used in tools/zig-gdb.py to fetch the child type to facilitate
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 6818b91d5a..aa51176184 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -321,11 +321,15 @@ pub const Ip6Address = extern struct {
if (scope_id) {
if (c >= '0' and c <= '9') {
const digit = c - '0';
- if (@mulWithOverflow(u32, result.sa.scope_id, 10, &result.sa.scope_id)) {
- return error.Overflow;
+ {
+ const ov = @mulWithOverflow(result.sa.scope_id, 10);
+ if (ov[1] != 0) return error.Overflow;
+ result.sa.scope_id = ov[0];
}
- if (@addWithOverflow(u32, result.sa.scope_id, digit, &result.sa.scope_id)) {
- return error.Overflow;
+ {
+ const ov = @addWithOverflow(result.sa.scope_id, digit);
+ if (ov[1] != 0) return error.Overflow;
+ result.sa.scope_id = ov[0];
}
} else {
return error.InvalidCharacter;
@@ -377,11 +381,15 @@ pub const Ip6Address = extern struct {
return result;
} else {
const digit = try std.fmt.charToDigit(c, 16);
- if (@mulWithOverflow(u16, x, 16, &x)) {
- return error.Overflow;
+ {
+ const ov = @mulWithOverflow(x, 16);
+ if (ov[1] != 0) return error.Overflow;
+ x = ov[0];
}
- if (@addWithOverflow(u16, x, digit, &x)) {
- return error.Overflow;
+ {
+ const ov = @addWithOverflow(x, digit);
+ if (ov[1] != 0) return error.Overflow;
+ x = ov[0];
}
saw_any_digits = true;
}
@@ -492,11 +500,15 @@ pub const Ip6Address = extern struct {
return result;
} else {
const digit = try std.fmt.charToDigit(c, 16);
- if (@mulWithOverflow(u16, x, 16, &x)) {
- return error.Overflow;
+ {
+ const ov = @mulWithOverflow(x, 16);
+ if (ov[1] != 0) return error.Overflow;
+ x = ov[0];
}
- if (@addWithOverflow(u16, x, digit, &x)) {
- return error.Overflow;
+ {
+ const ov = @addWithOverflow(x, digit);
+ if (ov[1] != 0) return error.Overflow;
+ x = ov[0];
}
saw_any_digits = true;
}
@@ -1660,6 +1672,40 @@ pub const Stream = struct {
}
}
+ pub fn readv(s: Stream, iovecs: []const os.iovec) ReadError!usize {
+ if (builtin.os.tag == .windows) {
+ // TODO improve this to use ReadFileScatter
+ if (iovecs.len == 0) return @as(usize, 0);
+ const first = iovecs[0];
+ return os.windows.ReadFile(s.handle, first.iov_base[0..first.iov_len], null, io.default_mode);
+ }
+
+ return os.readv(s.handle, iovecs);
+ }
+
+ /// Returns the number of bytes read. If the number read is smaller than
+ /// `buffer.len`, it means the stream reached the end. Reaching the end of
+ /// a stream is not an error condition.
+ pub fn readAll(s: Stream, buffer: []u8) ReadError!usize {
+ return readAtLeast(s, buffer, buffer.len);
+ }
+
+ /// Returns the number of bytes read, calling the underlying read function
+ /// the minimal number of times until the buffer has at least `len` bytes
+ /// filled. If the number read is less than `len` it means the stream
+ /// reached the end. Reaching the end of the stream is not an error
+ /// condition.
+ pub fn readAtLeast(s: Stream, buffer: []u8, len: usize) ReadError!usize {
+ assert(len <= buffer.len);
+ var index: usize = 0;
+ while (index < len) {
+ const amt = try s.read(buffer[index..]);
+ if (amt == 0) break;
+ index += amt;
+ }
+ return index;
+ }
+
/// TODO in evented I/O mode, this implementation incorrectly uses the event loop's
/// file system thread instead of non-blocking. It needs to be reworked to properly
/// use non-blocking I/O.
@@ -1675,6 +1721,13 @@ pub const Stream = struct {
}
}
+ pub fn writeAll(self: Stream, bytes: []const u8) WriteError!void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try self.write(bytes[index..]);
+ }
+ }
+
/// See https://github.com/ziglang/zig/issues/7699
/// See equivalent function: `std.fs.File.writev`.
pub fn writev(self: Stream, iovecs: []const os.iovec_const) WriteError!usize {
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 3d0c7a6351..ffc294f0e6 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -767,6 +767,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
.ISDIR => return error.IsDir,
.NOBUFS => return error.SystemResources,
.NOMEM => return error.SystemResources,
+ .CONNRESET => return error.ConnectionResetByPeer,
else => |err| return unexpectedErrno(err),
}
}
@@ -1944,19 +1945,7 @@ pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 {
while (ptr[i] != 0) : (i += 1) {}
const this_value = ptr[value_start..i :0];
- const key_string_bytes = @intCast(u16, key_slice.len * 2);
- const key_string = windows.UNICODE_STRING{
- .Length = key_string_bytes,
- .MaximumLength = key_string_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(key)),
- };
- const this_key_string_bytes = @intCast(u16, this_key.len * 2);
- const this_key_string = windows.UNICODE_STRING{
- .Length = this_key_string_bytes,
- .MaximumLength = this_key_string_bytes,
- .Buffer = this_key.ptr,
- };
- if (windows.ntdll.RtlEqualUnicodeString(&key_string, &this_key_string, windows.TRUE) == windows.TRUE) {
+ if (windows.eqlIgnoreCaseWTF16(key_slice, this_key)) {
return this_value;
}
@@ -2708,6 +2697,8 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirErr
.NOSPC => return error.NoSpaceLeft,
.NOTDIR => return error.NotDir,
.ROFS => return error.ReadOnlyFileSystem,
+ // dragonfly: when dir_fd is unlinked from filesystem
+ .NOTCONN => return error.FileNotFound,
else => |err| return unexpectedErrno(err),
}
}
@@ -5110,6 +5101,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
.SUCCESS => {},
.BADF => return error.FileNotFound,
+ .NOSPC => return error.NameTooLong,
// TODO man pages for fcntl on macOS don't really tell you what
// errno values to expect when command is F.GETPATH...
else => |err| return unexpectedErrno(err),
@@ -5142,19 +5134,85 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
return target;
},
.freebsd => {
- comptime if (builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0 }) == .lt)
- @compileError("querying for canonical path of a handle is unsupported on FreeBSD 12 and below");
-
- var kfile: system.kinfo_file = undefined;
- kfile.structsize = system.KINFO_FILE_SIZE;
- switch (errno(system.fcntl(fd, system.F.KINFO, @ptrToInt(&kfile)))) {
+ if (comptime builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0 }) == .gt) {
+ var kfile: system.kinfo_file = undefined;
+ kfile.structsize = system.KINFO_FILE_SIZE;
+ switch (errno(system.fcntl(fd, system.F.KINFO, @ptrToInt(&kfile)))) {
+ .SUCCESS => {},
+ .BADF => return error.FileNotFound,
+ else => |err| return unexpectedErrno(err),
+ }
+ const len = mem.indexOfScalar(u8, &kfile.path, 0) orelse MAX_PATH_BYTES;
+ if (len == 0) return error.NameTooLong;
+ mem.copy(u8, out_buffer, kfile.path[0..len]);
+ return out_buffer[0..len];
+ } else {
+ // This fallback implementation reimplements libutil's `kinfo_getfile()`.
+ // The motivation is to avoid linking -lutil when building zig or general
+ // user executables.
+ var mib = [4]c_int{ CTL.KERN, KERN.PROC, KERN.PROC_FILEDESC, system.getpid() };
+ var len: usize = undefined;
+ sysctl(&mib, null, &len, null, 0) catch |err| switch (err) {
+ error.PermissionDenied => unreachable,
+ error.SystemResources => return error.SystemResources,
+ error.NameTooLong => unreachable,
+ error.UnknownName => unreachable,
+ else => return error.Unexpected,
+ };
+ len = len * 4 / 3;
+ const buf = std.heap.c_allocator.alloc(u8, len) catch return error.SystemResources;
+ defer std.heap.c_allocator.free(buf);
+ len = buf.len;
+ sysctl(&mib, &buf[0], &len, null, 0) catch |err| switch (err) {
+ error.PermissionDenied => unreachable,
+ error.SystemResources => return error.SystemResources,
+ error.NameTooLong => unreachable,
+ error.UnknownName => unreachable,
+ else => return error.Unexpected,
+ };
+ var i: usize = 0;
+ while (i < len) {
+ const kf: *align(1) system.kinfo_file = @ptrCast(*align(1) system.kinfo_file, &buf[i]);
+ if (kf.fd == fd) {
+ len = mem.indexOfScalar(u8, &kf.path, 0) orelse MAX_PATH_BYTES;
+ if (len == 0) return error.NameTooLong;
+ mem.copy(u8, out_buffer, kf.path[0..len]);
+ return out_buffer[0..len];
+ }
+ i += @intCast(usize, kf.structsize);
+ }
+ return error.InvalidHandle;
+ }
+ },
+ .dragonfly => {
+ if (comptime builtin.os.version_range.semver.max.order(.{ .major = 6, .minor = 0 }) == .lt) {
+ @compileError("querying for canonical path of a handle is unsupported on this host");
+ }
+ @memset(out_buffer, 0, MAX_PATH_BYTES);
+ switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
.SUCCESS => {},
.BADF => return error.FileNotFound,
+ .RANGE => return error.NameTooLong,
else => |err| return unexpectedErrno(err),
}
-
- const len = mem.indexOfScalar(u8, &kfile.path, 0) orelse MAX_PATH_BYTES;
- mem.copy(u8, out_buffer, kfile.path[0..len]);
+ const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES;
+ return out_buffer[0..len];
+ },
+ .netbsd => {
+ if (comptime builtin.os.version_range.semver.max.order(.{ .major = 10, .minor = 0 }) == .lt) {
+ @compileError("querying for canonical path of a handle is unsupported on this host");
+ }
+ @memset(out_buffer, 0, MAX_PATH_BYTES);
+ switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
+ .SUCCESS => {},
+ .ACCES => return error.AccessDenied,
+ .BADF => return error.FileNotFound,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .RANGE => return error.NameTooLong,
+ else => |err| return unexpectedErrno(err),
+ }
+ const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES;
return out_buffer[0..len];
},
else => @compileError("querying for canonical path of a handle is unsupported on this host"),
@@ -5628,11 +5686,11 @@ pub fn sendmsg(
/// The file descriptor of the sending socket.
sockfd: socket_t,
/// Message header and iovecs
- msg: msghdr_const,
+ msg: *const msghdr_const,
flags: u32,
) SendMsgError!usize {
while (true) {
- const rc = system.sendmsg(sockfd, @ptrCast(*const std.x.os.Socket.Message, &msg), @intCast(c_int, flags));
+ const rc = system.sendmsg(sockfd, msg, flags);
if (builtin.os.tag == .windows) {
if (rc == windows.ws2_32.SOCKET_ERROR) {
switch (windows.ws2_32.WSAGetLastError()) {
@@ -6540,6 +6598,8 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t {
}
},
.freebsd => {
+ if (comptime builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0 }) == .lt)
+ @compileError("memfd_create is unavailable on FreeBSD < 13.0");
const rc = system.memfd_create(name, flags);
switch (errno(rc)) {
.SUCCESS => return rc,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 7f227ffec4..d9d5fb3204 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1226,11 +1226,14 @@ pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noal
return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
-pub fn sendmsg(fd: i32, msg: *const std.x.os.Socket.Message, flags: c_int) usize {
+pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize {
+ const fd_usize = @bitCast(usize, @as(isize, fd));
+ const msg_usize = @ptrToInt(msg);
if (native_arch == .x86) {
- return socketcall(SC.sendmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)) });
+ return socketcall(SC.sendmsg, &[3]usize{ fd_usize, msg_usize, flags });
+ } else {
+ return syscall3(.sendmsg, fd_usize, msg_usize, flags);
}
- return syscall3(.sendmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)));
}
pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize {
@@ -1244,7 +1247,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
var size: i32 = 0;
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
- if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(i32, size, @intCast(i32, iov.iov_len), &size)) {
+ if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @intCast(i32, iov.iov_len))[1] != 0) {
// batch-send all messages up to the current message
if (next_unsent < i) {
const batch_size = i - next_unsent;
@@ -1274,24 +1277,42 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
}
pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
+ const fd_usize = @bitCast(usize, @as(isize, fd));
+ const addr_usize = @ptrToInt(addr);
if (native_arch == .x86) {
- return socketcall(SC.connect, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len });
+ return socketcall(SC.connect, &[3]usize{ fd_usize, addr_usize, len });
+ } else {
+ return syscall3(.connect, fd_usize, addr_usize, len);
}
- return syscall3(.connect, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len);
}
-pub fn recvmsg(fd: i32, msg: *std.x.os.Socket.Message, flags: c_int) usize {
+pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
+ const fd_usize = @bitCast(usize, @as(isize, fd));
+ const msg_usize = @ptrToInt(msg);
if (native_arch == .x86) {
- return socketcall(SC.recvmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)) });
+ return socketcall(SC.recvmsg, &[3]usize{ fd_usize, msg_usize, flags });
+ } else {
+ return syscall3(.recvmsg, fd_usize, msg_usize, flags);
}
- return syscall3(.recvmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)));
}
-pub fn recvfrom(fd: i32, noalias buf: [*]u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
+pub fn recvfrom(
+ fd: i32,
+ noalias buf: [*]u8,
+ len: usize,
+ flags: u32,
+ noalias addr: ?*sockaddr,
+ noalias alen: ?*socklen_t,
+) usize {
+ const fd_usize = @bitCast(usize, @as(isize, fd));
+ const buf_usize = @ptrToInt(buf);
+ const addr_usize = @ptrToInt(addr);
+ const alen_usize = @ptrToInt(alen);
if (native_arch == .x86) {
- return socketcall(SC.recvfrom, &[6]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen) });
+ return socketcall(SC.recvfrom, &[6]usize{ fd_usize, buf_usize, len, flags, addr_usize, alen_usize });
+ } else {
+ return syscall6(.recvfrom, fd_usize, buf_usize, len, flags, addr_usize, alen_usize);
}
- return syscall6(.recvfrom, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
pub fn shutdown(fd: i32, how: i32) usize {
@@ -3219,7 +3240,15 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ family: sa_family_t align(8),
+ padding: [SS_MAXSIZE - @sizeOf(sa_family_t)]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
/// IPv4 socket address
pub const in = extern struct {
diff --git a/lib/std/os/linux/seccomp.zig b/lib/std/os/linux/seccomp.zig
index fd002e7416..03a96633f8 100644
--- a/lib/std/os/linux/seccomp.zig
+++ b/lib/std/os/linux/seccomp.zig
@@ -6,16 +6,14 @@
//! isn't that useful for general-purpose applications, and so a mode that
//! utilizes user-supplied filters mode was added.
//!
-//! Seccomp filters are classic BPF programs, which means that all the
-//! information under `std.x.net.bpf` applies here as well. Conceptually, a
-//! seccomp program is attached to the kernel and is executed on each syscall.
-//! The "packet" being validated is the `data` structure, and the verdict is an
-//! action that the kernel performs on the calling process. The actions are
-//! variations on a "pass" or "fail" result, where a pass allows the syscall to
-//! continue and a fail blocks the syscall and returns some sort of error value.
-//! See the full list of actions under ::RET for more information. Finally, only
-//! word-sized, absolute loads (`ld [k]`) are supported to read from the `data`
-//! structure.
+//! Seccomp filters are classic BPF programs. Conceptually, a seccomp program
+//! is attached to the kernel and is executed on each syscall. The "packet"
+//! being validated is the `data` structure, and the verdict is an action that
+//! the kernel performs on the calling process. The actions are variations on a
+//! "pass" or "fail" result, where a pass allows the syscall to continue and a
+//! fail blocks the syscall and returns some sort of error value. See the full
+//! list of actions under ::RET for more information. Finally, only word-sized,
+//! absolute loads (`ld [k]`) are supported to read from the `data` structure.
//!
//! There are some issues with the filter API that have traditionally made
//! writing them a pain:
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 483cec29d1..ae4de77b76 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -515,7 +515,14 @@ test "argsAlloc" {
test "memfd_create" {
// memfd_create is only supported by linux and freebsd.
- if (native_os != .linux and native_os != .freebsd) return error.SkipZigTest;
+ switch (native_os) {
+ .linux => {},
+ .freebsd => {
+ if (comptime builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0 }) == .lt)
+ return error.SkipZigTest;
+ },
+ else => return error.SkipZigTest,
+ }
const fd = std.os.memfd_create("test", 0) catch |err| switch (err) {
// Related: https://github.com/ziglang/zig/issues/4019
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index 338c9ea50d..6df6442fe6 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -81,7 +81,7 @@ pub const DevicePathProtocol = extern struct {
// Got the associated union type for self.type, now
// we need to initialize it and its subtype
if (self.type == enum_value) {
- var subtype = self.initSubtype(ufield.field_type);
+ var subtype = self.initSubtype(ufield.type);
if (subtype) |sb| {
// e.g. return .{ .Hardware = .{ .Pci = @ptrCast(...) } }
@@ -103,7 +103,7 @@ pub const DevicePathProtocol = extern struct {
if (self.subtype == tag_val) {
// e.g. expr = .{ .Pci = @ptrCast(...) }
- return @unionInit(TUnion, subtype.name, @ptrCast(subtype.field_type, self));
+ return @unionInit(TUnion, subtype.name, @ptrCast(subtype.type, self));
}
}
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 0bd695a029..2a4d0d9a9b 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -1493,9 +1493,9 @@ pub fn VirtualFree(lpAddress: ?LPVOID, dwSize: usize, dwFreeType: DWORD) void {
assert(kernel32.VirtualFree(lpAddress, dwSize, dwFreeType) != 0);
}
-pub const VirtualQuerryError = error{Unexpected};
+pub const VirtualQueryError = error{Unexpected};
-pub fn VirtualQuery(lpAddress: ?LPVOID, lpBuffer: PMEMORY_BASIC_INFORMATION, dwLength: SIZE_T) VirtualQuerryError!SIZE_T {
+pub fn VirtualQuery(lpAddress: ?LPVOID, lpBuffer: PMEMORY_BASIC_INFORMATION, dwLength: SIZE_T) VirtualQueryError!SIZE_T {
const rc = kernel32.VirtualQuery(lpAddress, lpBuffer, dwLength);
if (rc == 0) {
switch (kernel32.GetLastError()) {
@@ -1569,6 +1569,7 @@ pub const CreateProcessError = error{
AccessDenied,
InvalidName,
NameTooLong,
+ InvalidExe,
Unexpected,
};
@@ -1603,6 +1604,30 @@ pub fn CreateProcessW(
.INVALID_PARAMETER => unreachable,
.INVALID_NAME => return error.InvalidName,
.FILENAME_EXCED_RANGE => return error.NameTooLong,
+ // These are all the system errors that are mapped to ENOEXEC by
+ // the undocumented _dosmaperr (old CRT) or __acrt_errno_map_os_error
+ // (newer CRT) functions. Their code can be found in crt/src/dosmap.c (old SDK)
+ // or urt/misc/errno.cpp (newer SDK) in the Windows SDK.
+ .BAD_FORMAT,
+ .INVALID_STARTING_CODESEG, // MIN_EXEC_ERROR in errno.cpp
+ .INVALID_STACKSEG,
+ .INVALID_MODULETYPE,
+ .INVALID_EXE_SIGNATURE,
+ .EXE_MARKED_INVALID,
+ .BAD_EXE_FORMAT,
+ .ITERATED_DATA_EXCEEDS_64k,
+ .INVALID_MINALLOCSIZE,
+ .DYNLINK_FROM_INVALID_RING,
+ .IOPL_NOT_ENABLED,
+ .INVALID_SEGDPL,
+ .AUTODATASEG_EXCEEDS_64k,
+ .RING2SEG_MUST_BE_MOVABLE,
+ .RELOC_CHAIN_XEEDS_SEGLIM,
+ .INFLOOP_IN_RELOC_CHAIN, // MAX_EXEC_ERROR in errno.cpp
+ // This one is not mapped to ENOEXEC but it is possible, for example
+ // when calling CreateProcessW on a plain text file with a .exe extension
+ .EXE_MACHINE_TYPE_MISMATCH,
+ => return error.InvalidExe,
else => |err| return unexpectedError(err),
}
}
@@ -1751,16 +1776,26 @@ pub fn UnlockFile(
}
}
+/// This is a workaround for the C backend until zig has the ability to put
+/// C code in inline assembly.
+extern fn zig_x86_64_windows_teb() callconv(.C) *anyopaque;
+
pub fn teb() *TEB {
return switch (native_arch) {
.x86 => asm volatile (
\\ movl %%fs:0x18, %[ptr]
: [ptr] "=r" (-> *TEB),
),
- .x86_64 => asm volatile (
- \\ movq %%gs:0x30, %[ptr]
- : [ptr] "=r" (-> *TEB),
- ),
+ .x86_64 => blk: {
+ if (builtin.zig_backend == .stage2_c) {
+ break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb()));
+ } else {
+ break :blk asm volatile (
+ \\ movq %%gs:0x30, %[ptr]
+ : [ptr] "=r" (-> *TEB),
+ );
+ }
+ },
.aarch64 => asm volatile (
\\ mov %[ptr], x18
: [ptr] "=r" (-> *TEB),
@@ -1802,6 +1837,23 @@ pub fn nanoSecondsToFileTime(ns: i128) FILETIME {
};
}
+/// Compares two WTF16 strings using RtlEqualUnicodeString
+pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
+ const a_bytes = @intCast(u16, a.len * 2);
+ const a_string = UNICODE_STRING{
+ .Length = a_bytes,
+ .MaximumLength = a_bytes,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(a.ptr)),
+ };
+ const b_bytes = @intCast(u16, b.len * 2);
+ const b_string = UNICODE_STRING{
+ .Length = b_bytes,
+ .MaximumLength = b_bytes,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(b.ptr)),
+ };
+ return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE;
+}
+
pub const PathSpace = struct {
data: [PATH_MAX_WIDE:0]u16,
len: usize,
@@ -3413,6 +3465,21 @@ pub const ASSEMBLY_STORAGE_MAP = opaque {};
pub const FLS_CALLBACK_INFO = opaque {};
pub const RTL_BITMAP = opaque {};
pub const KAFFINITY = usize;
+pub const KPRIORITY = i32;
+
+pub const CLIENT_ID = extern struct {
+ UniqueProcess: HANDLE,
+ UniqueThread: HANDLE,
+};
+
+pub const THREAD_BASIC_INFORMATION = extern struct {
+ ExitStatus: NTSTATUS,
+ TebBaseAddress: PVOID,
+ ClientId: CLIENT_ID,
+ AffinityMask: KAFFINITY,
+ Priority: KPRIORITY,
+ BasePriority: KPRIORITY,
+};
pub const TEB = extern struct {
Reserved1: [12]PVOID,
@@ -3660,6 +3727,20 @@ pub const RTL_DRIVE_LETTER_CURDIR = extern struct {
pub const PPS_POST_PROCESS_INIT_ROUTINE = ?*const fn () callconv(.C) void;
+pub const FILE_DIRECTORY_INFORMATION = extern struct {
+ NextEntryOffset: ULONG,
+ FileIndex: ULONG,
+ CreationTime: LARGE_INTEGER,
+ LastAccessTime: LARGE_INTEGER,
+ LastWriteTime: LARGE_INTEGER,
+ ChangeTime: LARGE_INTEGER,
+ EndOfFile: LARGE_INTEGER,
+ AllocationSize: LARGE_INTEGER,
+ FileAttributes: ULONG,
+ FileNameLength: ULONG,
+ FileName: [1]WCHAR,
+};
+
pub const FILE_BOTH_DIR_INFORMATION = extern struct {
NextEntryOffset: ULONG,
FileIndex: ULONG,
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index e0c7b96f84..e1cb7f333a 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -177,6 +177,8 @@ pub extern "kernel32" fn GetEnvironmentStringsW() callconv(WINAPI) ?[*:0]u16;
pub extern "kernel32" fn GetEnvironmentVariableW(lpName: LPWSTR, lpBuffer: [*]u16, nSize: DWORD) callconv(WINAPI) DWORD;
+pub extern "kernel32" fn SetEnvironmentVariableW(lpName: LPCWSTR, lpValue: ?LPCWSTR) callconv(WINAPI) BOOL;
+
pub extern "kernel32" fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) callconv(WINAPI) BOOL;
pub extern "kernel32" fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) callconv(WINAPI) BOOL;
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index 90e1422fd2..b4d18264f3 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -1,4 +1,5 @@
const std = @import("../../std.zig");
+const assert = std.debug.assert;
const windows = std.os.windows;
const WINAPI = windows.WINAPI;
@@ -1106,7 +1107,15 @@ pub const sockaddr = extern struct {
data: [14]u8,
pub const SS_MAXSIZE = 128;
- pub const storage = std.x.os.Socket.Address.Native.Storage;
+ pub const storage = extern struct {
+ family: ADDRESS_FAMILY align(8),
+ padding: [SS_MAXSIZE - @sizeOf(ADDRESS_FAMILY)]u8 = undefined,
+
+ comptime {
+ assert(@sizeOf(storage) == SS_MAXSIZE);
+ assert(@alignOf(storage) == 8);
+ }
+ };
/// IPv4 socket address
pub const in = extern struct {
@@ -1207,7 +1216,7 @@ pub const LPFN_GETACCEPTEXSOCKADDRS = *const fn (
pub const LPFN_WSASENDMSG = *const fn (
s: SOCKET,
- lpMsg: *const std.x.os.Socket.Message,
+ lpMsg: *const WSAMSG_const,
dwFlags: u32,
lpNumberOfBytesSent: ?*u32,
lpOverlapped: ?*OVERLAPPED,
@@ -1216,7 +1225,7 @@ pub const LPFN_WSASENDMSG = *const fn (
pub const LPFN_WSARECVMSG = *const fn (
s: SOCKET,
- lpMsg: *std.x.os.Socket.Message,
+ lpMsg: *WSAMSG,
lpdwNumberOfBytesRecv: ?*u32,
lpOverlapped: ?*OVERLAPPED,
lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE,
@@ -2090,7 +2099,7 @@ pub extern "ws2_32" fn WSASend(
pub extern "ws2_32" fn WSASendMsg(
s: SOCKET,
- lpMsg: *const std.x.os.Socket.Message,
+ lpMsg: *WSAMSG_const,
dwFlags: u32,
lpNumberOfBytesSent: ?*u32,
lpOverlapped: ?*OVERLAPPED,
@@ -2099,7 +2108,7 @@ pub extern "ws2_32" fn WSASendMsg(
pub extern "ws2_32" fn WSARecvMsg(
s: SOCKET,
- lpMsg: *std.x.os.Socket.Message,
+ lpMsg: *WSAMSG,
lpdwNumberOfBytesRecv: ?*u32,
lpOverlapped: ?*OVERLAPPED,
lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE,
diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig
index c2129381a5..f15ddbe974 100644
--- a/lib/std/packed_int_array.zig
+++ b/lib/std/packed_int_array.zig
@@ -208,6 +208,9 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
/// The number of elements in the packed array.
comptime len: usize = int_count,
+ /// The integer type of the packed array.
+ pub const Child = Int;
+
/// Initialize a packed array using an unpacked array
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
@@ -283,6 +286,9 @@ pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type {
bit_offset: u3,
len: usize,
+ /// The integer type of the packed slice.
+ pub const Child = Int;
+
/// Calculates the number of bytes required to store a desired count
/// of `Int`s.
pub fn bytesRequired(int_count: usize) usize {
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 23fad9e8c6..bcd0d5bfa9 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -307,7 +307,7 @@ pub fn getEnvMap(allocator: Allocator) !EnvMap {
const pair = mem.sliceTo(env, 0);
var parts = mem.split(u8, pair, "=");
const key = parts.first();
- const value = parts.next().?;
+ const value = parts.rest();
try result.put(key, value);
}
return result;
@@ -1023,8 +1023,16 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile;
+ {
+ const ov = @mulWithOverflow(uid, 10);
+ if (ov[1] != 0) return error.CorruptPasswordFile;
+ uid = ov[0];
+ }
+ {
+ const ov = @addWithOverflow(uid, digit);
+ if (ov[1] != 0) return error.CorruptPasswordFile;
+ uid = ov[0];
+ }
},
},
.ReadGroupId => switch (byte) {
@@ -1039,8 +1047,16 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile;
+ {
+ const ov = @mulWithOverflow(gid, 10);
+ if (ov[1] != 0) return error.CorruptPasswordFile;
+ gid = ov[0];
+ }
+ {
+ const ov = @addWithOverflow(gid, digit);
+ if (ov[1] != 0) return error.CorruptPasswordFile;
+ gid = ov[0];
+ }
},
},
}
diff --git a/lib/std/rand/Xoroshiro128.zig b/lib/std/rand/Xoroshiro128.zig
index 60907a017b..6ddd2eb89e 100644
--- a/lib/std/rand/Xoroshiro128.zig
+++ b/lib/std/rand/Xoroshiro128.zig
@@ -20,7 +20,7 @@ pub fn random(self: *Xoroshiro128) Random {
return Random.init(self, fill);
}
-fn next(self: *Xoroshiro128) u64 {
+pub fn next(self: *Xoroshiro128) u64 {
const s0 = self.s[0];
var s1 = self.s[1];
const r = s0 +% s1;
@@ -33,7 +33,7 @@ fn next(self: *Xoroshiro128) u64 {
}
// Skip 2^64 places ahead in the sequence
-fn jump(self: *Xoroshiro128) void {
+pub fn jump(self: *Xoroshiro128) void {
var s0: u64 = 0;
var s1: u64 = 0;
diff --git a/lib/std/rand/Xoshiro256.zig b/lib/std/rand/Xoshiro256.zig
index b81b5178aa..42ad43c445 100644
--- a/lib/std/rand/Xoshiro256.zig
+++ b/lib/std/rand/Xoshiro256.zig
@@ -22,7 +22,7 @@ pub fn random(self: *Xoshiro256) Random {
return Random.init(self, fill);
}
-fn next(self: *Xoshiro256) u64 {
+pub fn next(self: *Xoshiro256) u64 {
const r = math.rotl(u64, self.s[0] +% self.s[3], 23) +% self.s[0];
const t = self.s[1] << 17;
@@ -40,7 +40,7 @@ fn next(self: *Xoshiro256) u64 {
}
// Skip 2^128 places ahead in the sequence
-fn jump(self: *Xoshiro256) void {
+pub fn jump(self: *Xoshiro256) void {
var s: u256 = 0;
var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba;
diff --git a/lib/std/simd.zig b/lib/std/simd.zig
index 2179cb496e..95de3cc11c 100644
--- a/lib/std/simd.zig
+++ b/lib/std/simd.zig
@@ -191,6 +191,10 @@ pub fn extract(
}
test "vector patterns" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
const base = @Vector(4, u32){ 10, 20, 30, 40 };
const other_base = @Vector(4, u32){ 55, 66, 77, 88 };
diff --git a/lib/std/start.zig b/lib/std/start.zig
index b8c74a6373..dcc0b7427a 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -634,7 +634,7 @@ pub fn callMain() u8 {
}
pub fn call_wWinMain() std.os.windows.INT {
- const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.args[0].arg_type.?;
+ const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.params[0].type.?;
const hInstance = @ptrCast(MAIN_HINSTANCE, std.os.windows.kernel32.GetModuleHandleW(null).?);
const lpCmdLine = std.os.windows.kernel32.GetCommandLineW();
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 05474ac6d5..1cbcd6bad7 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -41,7 +41,8 @@ pub const TailQueue = @import("linked_list.zig").TailQueue;
pub const Target = @import("target.zig").Target;
pub const Thread = @import("Thread.zig");
pub const Treap = @import("treap.zig").Treap;
-pub const Tz = @import("tz.zig").Tz;
+pub const Tz = tz.Tz;
+pub const Url = @import("Url.zig");
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
@@ -86,10 +87,10 @@ pub const simd = @import("simd.zig");
pub const ascii = @import("ascii.zig");
pub const testing = @import("testing.zig");
pub const time = @import("time.zig");
+pub const tz = @import("tz.zig");
pub const unicode = @import("unicode.zig");
pub const valgrind = @import("valgrind.zig");
pub const wasm = @import("wasm.zig");
-pub const x = @import("x.zig");
pub const zig = @import("zig.zig");
pub const start = @import("start.zig");
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 49a7bd1c7d..69acfadd9e 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -273,7 +273,7 @@ pub const Target = struct {
.freebsd => return .{
.semver = Version.Range{
.min = .{ .major = 12, .minor = 0 },
- .max = .{ .major = 13, .minor = 0 },
+ .max = .{ .major = 13, .minor = 1 },
},
},
.macos => return switch (arch) {
@@ -312,19 +312,19 @@ pub const Target = struct {
.netbsd => return .{
.semver = .{
.min = .{ .major = 8, .minor = 0 },
- .max = .{ .major = 9, .minor = 1 },
+ .max = .{ .major = 10, .minor = 0 },
},
},
.openbsd => return .{
.semver = .{
.min = .{ .major = 6, .minor = 8 },
- .max = .{ .major = 6, .minor = 9 },
+ .max = .{ .major = 7, .minor = 2 },
},
},
.dragonfly => return .{
.semver = .{
.min = .{ .major = 5, .minor = 8 },
- .max = .{ .major = 6, .minor = 0 },
+ .max = .{ .major = 6, .minor = 4 },
},
},
.solaris => return .{
@@ -626,7 +626,7 @@ pub const Target = struct {
.raw => ".bin",
.plan9 => plan9Ext(cpu_arch),
.nvptx => ".ptx",
- .dxcontainer => @panic("TODO what's the extension for these?"),
+ .dxcontainer => ".dxil",
};
}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index af0074a549..53877967c9 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -312,7 +312,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
const actual_window = actual[window_start..@min(actual.len, window_start + max_window_size)];
const actual_truncated = window_start + actual_window.len < actual.len;
- const ttyconf = std.debug.detectTTYConfig();
+ const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
var differ = if (T == u8) BytesDiffer{
.expected = expected_window,
.actual = actual_window,
@@ -802,7 +802,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
const ArgsTuple = std.meta.ArgsTuple(@TypeOf(test_fn));
const fn_args_fields = @typeInfo(ArgsTuple).Struct.fields;
- if (fn_args_fields.len == 0 or fn_args_fields[0].field_type != std.mem.Allocator) {
+ if (fn_args_fields.len == 0 or fn_args_fields[0].type != std.mem.Allocator) {
@compileError("The provided function must have an " ++ @typeName(std.mem.Allocator) ++ " as its first argument");
}
const expected_args_tuple_len = fn_args_fields.len - 1;
diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig
index 0e61b3fb30..2d519ace8b 100644
--- a/lib/std/wasm.zig
+++ b/lib/std/wasm.zig
@@ -215,7 +215,9 @@ test "Wasm - opcodes" {
}
/// Opcodes that require a prefix `0xFC`
-pub const PrefixedOpcode = enum(u8) {
+/// Each opcode represents a varuint32, meaning
+/// they are encoded as leb128 in binary.
+pub const PrefixedOpcode = enum(u32) {
i32_trunc_sat_f32_s = 0x00,
i32_trunc_sat_f32_u = 0x01,
i32_trunc_sat_f64_s = 0x02,
diff --git a/lib/std/x.zig b/lib/std/x.zig
deleted file mode 100644
index 64caf324ed..0000000000
--- a/lib/std/x.zig
+++ /dev/null
@@ -1,19 +0,0 @@
-const std = @import("std.zig");
-
-pub const os = struct {
- pub const Socket = @import("x/os/socket.zig").Socket;
- pub usingnamespace @import("x/os/io.zig");
- pub usingnamespace @import("x/os/net.zig");
-};
-
-pub const net = struct {
- pub const ip = @import("x/net/ip.zig");
- pub const tcp = @import("x/net/tcp.zig");
- pub const bpf = @import("x/net/bpf.zig");
-};
-
-test {
- inline for (.{ os, net }) |module| {
- std.testing.refAllDecls(module);
- }
-}
diff --git a/lib/std/x/net/bpf.zig b/lib/std/x/net/bpf.zig
deleted file mode 100644
index bee930c332..0000000000
--- a/lib/std/x/net/bpf.zig
+++ /dev/null
@@ -1,1003 +0,0 @@
-//! This package provides instrumentation for creating Berkeley Packet Filter[1]
-//! (BPF) programs, along with a simulator for running them.
-//!
-//! BPF is a mechanism for cheap, in-kernel packet filtering. Programs are
-//! attached to a network device and executed for every packet that flows
-//! through it. The program must then return a verdict: the amount of packet
-//! bytes that the kernel should copy into userspace. Execution speed is
-//! achieved by having programs run in a limited virtual machine, which has the
-//! added benefit of graceful failure in the face of buggy programs.
-//!
-//! The BPF virtual machine has a 32-bit word length and a small number of
-//! word-sized registers:
-//!
-//! - The accumulator, `a`: The source/destination of arithmetic and logic
-//! operations.
-//! - The index register, `x`: Used as an offset for indirect memory access and
-//! as a comparison value for conditional jumps.
-//! - The scratch memory store, `M[0]..M[15]`: Used for saving the value of a/x
-//! for later use.
-//!
-//! The packet being examined is an array of bytes, and is addressed using plain
-//! array subscript notation, e.g. [10] for the byte at offset 10. An implicit
-//! program counter, `pc`, is intialized to zero and incremented for each instruction.
-//!
-//! The machine has a fixed instruction set with the following form, where the
-//! numbers represent bit length:
-//!
-//! ```
-//! ┌───────────┬──────┬──────┐
-//! │ opcode:16 │ jt:8 │ jt:8 │
-//! ├───────────┴──────┴──────┤
-//! │ k:32 │
-//! └─────────────────────────┘
-//! ```
-//!
-//! The `opcode` indicates the instruction class and its addressing mode.
-//! Opcodes are generated by performing binary addition on the 8-bit class and
-//! mode constants. For example, the opcode for loading a byte from the packet
-//! at X + 2, (`ldb [x + 2]`), is:
-//!
-//! ```
-//! LD | IND | B = 0x00 | 0x40 | 0x20
-//! = 0x60
-//! ```
-//!
-//! `jt` is an offset used for conditional jumps, and increments the program
-//! counter by its amount if the comparison was true. Conversely, `jf`
-//! increments the counter if it was false. These fields are ignored in all
-//! other cases. `k` is a generic variable used for various purposes, most
-//! commonly as some sort of constant.
-//!
-//! This package contains opcode extensions used by different implementations,
-//! where "extension" is anything outside of the original that was imported into
-//! 4.4BSD[2]. These are marked with "EXTENSION", along with a list of
-//! implementations that use them.
-//!
-//! Most of the doc-comments use the BPF assembly syntax as described in the
-//! original paper[1]. For the sake of completeness, here is the complete
-//! instruction set, along with the extensions:
-//!
-//!```
-//! opcode addressing modes
-//! ld #k #len M[k] [k] [x + k]
-//! ldh [k] [x + k]
-//! ldb [k] [x + k]
-//! ldx #k #len M[k] 4 * ([k] & 0xf) arc4random()
-//! st M[k]
-//! stx M[k]
-//! jmp L
-//! jeq #k, Lt, Lf
-//! jgt #k, Lt, Lf
-//! jge #k, Lt, Lf
-//! jset #k, Lt, Lf
-//! add #k x
-//! sub #k x
-//! mul #k x
-//! div #k x
-//! or #k x
-//! and #k x
-//! lsh #k x
-//! rsh #k x
-//! neg #k x
-//! mod #k x
-//! xor #k x
-//! ret #k a
-//! tax
-//! txa
-//! ```
-//!
-//! Finally, a note on program design. The lack of backwards jumps leads to a
-//! "return early, return often" control flow. Take for example the program
-//! generated from the tcpdump filter `ip`:
-//!
-//! ```
-//! (000) ldh [12] ; Ethernet Packet Type
-//! (001) jeq #0x86dd, 2, 7 ; ETHERTYPE_IPV6
-//! (002) ldb [20] ; IPv6 Next Header
-//! (003) jeq #0x6, 10, 4 ; TCP
-//! (004) jeq #0x2c, 5, 11 ; IPv6 Fragment Header
-//! (005) ldb [54] ; TCP Source Port
-//! (006) jeq #0x6, 10, 11 ; IPPROTO_TCP
-//! (007) jeq #0x800, 8, 11 ; ETHERTYPE_IP
-//! (008) ldb [23] ; IPv4 Protocol
-//! (009) jeq #0x6, 10, 11 ; IPPROTO_TCP
-//! (010) ret #262144 ; copy 0x40000
-//! (011) ret #0 ; skip packet
-//! ```
-//!
-//! Here we can make a few observations:
-//!
-//! - The problem "filter only tcp packets" has essentially been transformed
-//! into a series of layer checks.
-//! - There are two distinct branches in the code, one for validating IPv4
-//! headers and one for IPv6 headers.
-//! - Most conditional jumps in these branches lead directly to the last two
-//! instructions, a pass or fail. Thus the goal of a program is to find the
-//! fastest route to a pass/fail comparison.
-//!
-//! [1]: S. McCanne and V. Jacobson, "The BSD Packet Filter: A New Architecture
-//! for User-level Packet Capture", Proceedings of the 1993 Winter USENIX.
-//! [2]: https://minnie.tuhs.org/cgi-bin/utree.pl?file=4.4BSD/usr/src/sys/net/bpf.h
-const std = @import("std");
-const builtin = @import("builtin");
-const native_endian = builtin.target.cpu.arch.endian();
-const mem = std.mem;
-const math = std.math;
-const random = std.crypto.random;
-const assert = std.debug.assert;
-const expectEqual = std.testing.expectEqual;
-const expectError = std.testing.expectError;
-const expect = std.testing.expect;
-
-// instruction classes
-/// ld, ldh, ldb: Load data into a.
-pub const LD = 0x00;
-/// ldx: Load data into x.
-pub const LDX = 0x01;
-/// st: Store into scratch memory the value of a.
-pub const ST = 0x02;
-/// st: Store into scratch memory the value of x.
-pub const STX = 0x03;
-/// alu: Wrapping arithmetic/bitwise operations on a using the value of k/x.
-pub const ALU = 0x04;
-/// jmp, jeq, jgt, je, jset: Increment the program counter based on a comparison
-/// between k/x and the accumulator.
-pub const JMP = 0x05;
-/// ret: Return a verdict using the value of k/the accumulator.
-pub const RET = 0x06;
-/// tax, txa: Register value copying between X and a.
-pub const MISC = 0x07;
-
-// Size of data to be loaded from the packet.
-/// ld: 32-bit full word.
-pub const W = 0x00;
-/// ldh: 16-bit half word.
-pub const H = 0x08;
-/// ldb: Single byte.
-pub const B = 0x10;
-
-// Addressing modes used for loads to a/x.
-/// #k: The immediate value stored in k.
-pub const IMM = 0x00;
-/// [k]: The value at offset k in the packet.
-pub const ABS = 0x20;
-/// [x + k]: The value at offset x + k in the packet.
-pub const IND = 0x40;
-/// M[k]: The value of the k'th scratch memory register.
-pub const MEM = 0x60;
-/// #len: The size of the packet.
-pub const LEN = 0x80;
-/// 4 * ([k] & 0xf): Four times the low four bits of the byte at offset k in the
-/// packet. This is used for efficiently loading the header length of an IP
-/// packet.
-pub const MSH = 0xa0;
-/// arc4random: 32-bit integer generated from a CPRNG (see arc4random(3)) loaded into a.
-/// EXTENSION. Defined for:
-/// - OpenBSD.
-pub const RND = 0xc0;
-
-// Modifiers for different instruction classes.
-/// Use the value of k for alu operations (add #k).
-/// Compare against the value of k for jumps (jeq #k, Lt, Lf).
-/// Return the value of k for returns (ret #k).
-pub const K = 0x00;
-/// Use the value of x for alu operations (add x).
-/// Compare against the value of X for jumps (jeq x, Lt, Lf).
-pub const X = 0x08;
-/// Return the value of a for returns (ret a).
-pub const A = 0x10;
-
-// ALU Operations on a using the value of k/x.
-// All arithmetic operations are defined to overflow the value of a.
-/// add: a = a + k
-/// a = a + x.
-pub const ADD = 0x00;
-/// sub: a = a - k
-/// a = a - x.
-pub const SUB = 0x10;
-/// mul: a = a * k
-/// a = a * x.
-pub const MUL = 0x20;
-/// div: a = a / k
-/// a = a / x.
-/// Truncated division.
-pub const DIV = 0x30;
-/// or: a = a | k
-/// a = a | x.
-pub const OR = 0x40;
-/// and: a = a & k
-/// a = a & x.
-pub const AND = 0x50;
-/// lsh: a = a << k
-/// a = a << x.
-/// a = a << k, a = a << x.
-pub const LSH = 0x60;
-/// rsh: a = a >> k
-/// a = a >> x.
-pub const RSH = 0x70;
-/// neg: a = -a.
-/// Note that this isn't a binary negation, rather the value of `~a + 1`.
-pub const NEG = 0x80;
-/// mod: a = a % k
-/// a = a % x.
-/// EXTENSION. Defined for:
-/// - Linux.
-/// - NetBSD + Minix 3.
-/// - FreeBSD and derivitives.
-pub const MOD = 0x90;
-/// xor: a = a ^ k
-/// a = a ^ x.
-/// EXTENSION. Defined for:
-/// - Linux.
-/// - NetBSD + Minix 3.
-/// - FreeBSD and derivitives.
-pub const XOR = 0xa0;
-
-// Jump operations using a comparison between a and x/k.
-/// jmp L: pc += k.
-/// No comparison done here.
-pub const JA = 0x00;
-/// jeq #k, Lt, Lf: pc += (a == k) ? jt : jf.
-/// jeq x, Lt, Lf: pc += (a == x) ? jt : jf.
-pub const JEQ = 0x10;
-/// jgt #k, Lt, Lf: pc += (a > k) ? jt : jf.
-/// jgt x, Lt, Lf: pc += (a > x) ? jt : jf.
-pub const JGT = 0x20;
-/// jge #k, Lt, Lf: pc += (a >= k) ? jt : jf.
-/// jge x, Lt, Lf: pc += (a >= x) ? jt : jf.
-pub const JGE = 0x30;
-/// jset #k, Lt, Lf: pc += (a & k > 0) ? jt : jf.
-/// jset x, Lt, Lf: pc += (a & x > 0) ? jt : jf.
-pub const JSET = 0x40;
-
-// Miscellaneous operations/register copy.
-/// tax: x = a.
-pub const TAX = 0x00;
-/// txa: a = x.
-pub const TXA = 0x80;
-
-/// The 16 registers in the scratch memory store as named enums.
-pub const Scratch = enum(u4) { m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15 };
-pub const MEMWORDS = 16;
-pub const MAXINSNS = switch (builtin.os.tag) {
- .linux => 4096,
- else => 512,
-};
-pub const MINBUFSIZE = 32;
-pub const MAXBUFSIZE = 1 << 21;
-
-pub const Insn = extern struct {
- opcode: u16,
- jt: u8,
- jf: u8,
- k: u32,
-
- /// Implements the `std.fmt.format` API.
- /// The formatting is similar to the output of tcpdump -dd.
- pub fn format(
- self: Insn,
- comptime layout: []const u8,
- opts: std.fmt.FormatOptions,
- writer: anytype,
- ) !void {
- _ = opts;
- if (layout.len != 0) std.fmt.invalidFmtError(layout, self);
-
- try std.fmt.format(
- writer,
- "Insn{{ 0x{X:0<2}, {d}, {d}, 0x{X:0<8} }}",
- .{ self.opcode, self.jt, self.jf, self.k },
- );
- }
-
- const Size = enum(u8) {
- word = W,
- half_word = H,
- byte = B,
- };
-
- fn stmt(opcode: u16, k: u32) Insn {
- return .{
- .opcode = opcode,
- .jt = 0,
- .jf = 0,
- .k = k,
- };
- }
-
- pub fn ld_imm(value: u32) Insn {
- return stmt(LD | IMM, value);
- }
-
- pub fn ld_abs(size: Size, offset: u32) Insn {
- return stmt(LD | ABS | @enumToInt(size), offset);
- }
-
- pub fn ld_ind(size: Size, offset: u32) Insn {
- return stmt(LD | IND | @enumToInt(size), offset);
- }
-
- pub fn ld_mem(reg: Scratch) Insn {
- return stmt(LD | MEM, @enumToInt(reg));
- }
-
- pub fn ld_len() Insn {
- return stmt(LD | LEN | W, 0);
- }
-
- pub fn ld_rnd() Insn {
- return stmt(LD | RND | W, 0);
- }
-
- pub fn ldx_imm(value: u32) Insn {
- return stmt(LDX | IMM, value);
- }
-
- pub fn ldx_mem(reg: Scratch) Insn {
- return stmt(LDX | MEM, @enumToInt(reg));
- }
-
- pub fn ldx_len() Insn {
- return stmt(LDX | LEN | W, 0);
- }
-
- pub fn ldx_msh(offset: u32) Insn {
- return stmt(LDX | MSH | B, offset);
- }
-
- pub fn st(reg: Scratch) Insn {
- return stmt(ST, @enumToInt(reg));
- }
- pub fn stx(reg: Scratch) Insn {
- return stmt(STX, @enumToInt(reg));
- }
-
- const AluOp = enum(u16) {
- add = ADD,
- sub = SUB,
- mul = MUL,
- div = DIV,
- @"or" = OR,
- @"and" = AND,
- lsh = LSH,
- rsh = RSH,
- mod = MOD,
- xor = XOR,
- };
-
- const Source = enum(u16) {
- k = K,
- x = X,
- };
- const KOrX = union(Source) {
- k: u32,
- x: void,
- };
-
- pub fn alu_neg() Insn {
- return stmt(ALU | NEG, 0);
- }
-
- pub fn alu(op: AluOp, source: KOrX) Insn {
- return stmt(
- ALU | @enumToInt(op) | @enumToInt(source),
- if (source == .k) source.k else 0,
- );
- }
-
- const JmpOp = enum(u16) {
- jeq = JEQ,
- jgt = JGT,
- jge = JGE,
- jset = JSET,
- };
-
- pub fn jmp_ja(location: u32) Insn {
- return stmt(JMP | JA, location);
- }
-
- pub fn jmp(op: JmpOp, source: KOrX, jt: u8, jf: u8) Insn {
- return Insn{
- .opcode = JMP | @enumToInt(op) | @enumToInt(source),
- .jt = jt,
- .jf = jf,
- .k = if (source == .k) source.k else 0,
- };
- }
-
- const Verdict = enum(u16) {
- k = K,
- a = A,
- };
- const KOrA = union(Verdict) {
- k: u32,
- a: void,
- };
-
- pub fn ret(verdict: KOrA) Insn {
- return stmt(
- RET | @enumToInt(verdict),
- if (verdict == .k) verdict.k else 0,
- );
- }
-
- pub fn tax() Insn {
- return stmt(MISC | TAX, 0);
- }
-
- pub fn txa() Insn {
- return stmt(MISC | TXA, 0);
- }
-};
-
-fn opcodeEqual(opcode: u16, insn: Insn) !void {
- try expectEqual(opcode, insn.opcode);
-}
-
-test "opcodes" {
- try opcodeEqual(0x00, Insn.ld_imm(0));
- try opcodeEqual(0x20, Insn.ld_abs(.word, 0));
- try opcodeEqual(0x28, Insn.ld_abs(.half_word, 0));
- try opcodeEqual(0x30, Insn.ld_abs(.byte, 0));
- try opcodeEqual(0x40, Insn.ld_ind(.word, 0));
- try opcodeEqual(0x48, Insn.ld_ind(.half_word, 0));
- try opcodeEqual(0x50, Insn.ld_ind(.byte, 0));
- try opcodeEqual(0x60, Insn.ld_mem(.m0));
- try opcodeEqual(0x80, Insn.ld_len());
- try opcodeEqual(0xc0, Insn.ld_rnd());
-
- try opcodeEqual(0x01, Insn.ldx_imm(0));
- try opcodeEqual(0x61, Insn.ldx_mem(.m0));
- try opcodeEqual(0x81, Insn.ldx_len());
- try opcodeEqual(0xb1, Insn.ldx_msh(0));
-
- try opcodeEqual(0x02, Insn.st(.m0));
- try opcodeEqual(0x03, Insn.stx(.m0));
-
- try opcodeEqual(0x04, Insn.alu(.add, .{ .k = 0 }));
- try opcodeEqual(0x14, Insn.alu(.sub, .{ .k = 0 }));
- try opcodeEqual(0x24, Insn.alu(.mul, .{ .k = 0 }));
- try opcodeEqual(0x34, Insn.alu(.div, .{ .k = 0 }));
- try opcodeEqual(0x44, Insn.alu(.@"or", .{ .k = 0 }));
- try opcodeEqual(0x54, Insn.alu(.@"and", .{ .k = 0 }));
- try opcodeEqual(0x64, Insn.alu(.lsh, .{ .k = 0 }));
- try opcodeEqual(0x74, Insn.alu(.rsh, .{ .k = 0 }));
- try opcodeEqual(0x94, Insn.alu(.mod, .{ .k = 0 }));
- try opcodeEqual(0xa4, Insn.alu(.xor, .{ .k = 0 }));
- try opcodeEqual(0x84, Insn.alu_neg());
- try opcodeEqual(0x0c, Insn.alu(.add, .x));
- try opcodeEqual(0x1c, Insn.alu(.sub, .x));
- try opcodeEqual(0x2c, Insn.alu(.mul, .x));
- try opcodeEqual(0x3c, Insn.alu(.div, .x));
- try opcodeEqual(0x4c, Insn.alu(.@"or", .x));
- try opcodeEqual(0x5c, Insn.alu(.@"and", .x));
- try opcodeEqual(0x6c, Insn.alu(.lsh, .x));
- try opcodeEqual(0x7c, Insn.alu(.rsh, .x));
- try opcodeEqual(0x9c, Insn.alu(.mod, .x));
- try opcodeEqual(0xac, Insn.alu(.xor, .x));
-
- try opcodeEqual(0x05, Insn.jmp_ja(0));
- try opcodeEqual(0x15, Insn.jmp(.jeq, .{ .k = 0 }, 0, 0));
- try opcodeEqual(0x25, Insn.jmp(.jgt, .{ .k = 0 }, 0, 0));
- try opcodeEqual(0x35, Insn.jmp(.jge, .{ .k = 0 }, 0, 0));
- try opcodeEqual(0x45, Insn.jmp(.jset, .{ .k = 0 }, 0, 0));
- try opcodeEqual(0x1d, Insn.jmp(.jeq, .x, 0, 0));
- try opcodeEqual(0x2d, Insn.jmp(.jgt, .x, 0, 0));
- try opcodeEqual(0x3d, Insn.jmp(.jge, .x, 0, 0));
- try opcodeEqual(0x4d, Insn.jmp(.jset, .x, 0, 0));
-
- try opcodeEqual(0x06, Insn.ret(.{ .k = 0 }));
- try opcodeEqual(0x16, Insn.ret(.a));
-
- try opcodeEqual(0x07, Insn.tax());
- try opcodeEqual(0x87, Insn.txa());
-}
-
-pub const Error = error{
- InvalidOpcode,
- InvalidOffset,
- InvalidLocation,
- DivisionByZero,
- NoReturn,
-};
-
-/// A simple implementation of the BPF virtual-machine.
-/// Use this to run/debug programs.
-pub fn simulate(
- packet: []const u8,
- filter: []const Insn,
- byte_order: std.builtin.Endian,
-) Error!u32 {
- assert(filter.len > 0 and filter.len < MAXINSNS);
- assert(packet.len < MAXBUFSIZE);
- const len = @intCast(u32, packet.len);
-
- var a: u32 = 0;
- var x: u32 = 0;
- var m = mem.zeroes([MEMWORDS]u32);
- var pc: usize = 0;
-
- while (pc < filter.len) : (pc += 1) {
- const i = filter[pc];
- // Cast to a wider type to protect against overflow.
- const k = @as(u64, i.k);
- const remaining = filter.len - (pc + 1);
-
- // Do validation/error checking here to compress the second switch.
- switch (i.opcode) {
- LD | ABS | W => if (k + @sizeOf(u32) - 1 >= packet.len) return error.InvalidOffset,
- LD | ABS | H => if (k + @sizeOf(u16) - 1 >= packet.len) return error.InvalidOffset,
- LD | ABS | B => if (k >= packet.len) return error.InvalidOffset,
- LD | IND | W => if (k + x + @sizeOf(u32) - 1 >= packet.len) return error.InvalidOffset,
- LD | IND | H => if (k + x + @sizeOf(u16) - 1 >= packet.len) return error.InvalidOffset,
- LD | IND | B => if (k + x >= packet.len) return error.InvalidOffset,
-
- LDX | MSH | B => if (k >= packet.len) return error.InvalidOffset,
- ST, STX, LD | MEM, LDX | MEM => if (i.k >= MEMWORDS) return error.InvalidOffset,
-
- JMP | JA => if (remaining <= i.k) return error.InvalidOffset,
- JMP | JEQ | K,
- JMP | JGT | K,
- JMP | JGE | K,
- JMP | JSET | K,
- JMP | JEQ | X,
- JMP | JGT | X,
- JMP | JGE | X,
- JMP | JSET | X,
- => if (remaining <= i.jt or remaining <= i.jf) return error.InvalidLocation,
- else => {},
- }
- switch (i.opcode) {
- LD | IMM => a = i.k,
- LD | MEM => a = m[i.k],
- LD | LEN | W => a = len,
- LD | RND | W => a = random.int(u32),
- LD | ABS | W => a = mem.readInt(u32, packet[i.k..][0..@sizeOf(u32)], byte_order),
- LD | ABS | H => a = mem.readInt(u16, packet[i.k..][0..@sizeOf(u16)], byte_order),
- LD | ABS | B => a = packet[i.k],
- LD | IND | W => a = mem.readInt(u32, packet[i.k + x ..][0..@sizeOf(u32)], byte_order),
- LD | IND | H => a = mem.readInt(u16, packet[i.k + x ..][0..@sizeOf(u16)], byte_order),
- LD | IND | B => a = packet[i.k + x],
-
- LDX | IMM => x = i.k,
- LDX | MEM => x = m[i.k],
- LDX | LEN | W => x = len,
- LDX | MSH | B => x = @as(u32, @truncate(u4, packet[i.k])) << 2,
-
- ST => m[i.k] = a,
- STX => m[i.k] = x,
-
- ALU | ADD | K => a +%= i.k,
- ALU | SUB | K => a -%= i.k,
- ALU | MUL | K => a *%= i.k,
- ALU | DIV | K => a = try math.divTrunc(u32, a, i.k),
- ALU | OR | K => a |= i.k,
- ALU | AND | K => a &= i.k,
- ALU | LSH | K => a = math.shl(u32, a, i.k),
- ALU | RSH | K => a = math.shr(u32, a, i.k),
- ALU | MOD | K => a = try math.mod(u32, a, i.k),
- ALU | XOR | K => a ^= i.k,
- ALU | ADD | X => a +%= x,
- ALU | SUB | X => a -%= x,
- ALU | MUL | X => a *%= x,
- ALU | DIV | X => a = try math.divTrunc(u32, a, x),
- ALU | OR | X => a |= x,
- ALU | AND | X => a &= x,
- ALU | LSH | X => a = math.shl(u32, a, x),
- ALU | RSH | X => a = math.shr(u32, a, x),
- ALU | MOD | X => a = try math.mod(u32, a, x),
- ALU | XOR | X => a ^= x,
- ALU | NEG => a = @bitCast(u32, -%@bitCast(i32, a)),
-
- JMP | JA => pc += i.k,
- JMP | JEQ | K => pc += if (a == i.k) i.jt else i.jf,
- JMP | JGT | K => pc += if (a > i.k) i.jt else i.jf,
- JMP | JGE | K => pc += if (a >= i.k) i.jt else i.jf,
- JMP | JSET | K => pc += if (a & i.k > 0) i.jt else i.jf,
- JMP | JEQ | X => pc += if (a == x) i.jt else i.jf,
- JMP | JGT | X => pc += if (a > x) i.jt else i.jf,
- JMP | JGE | X => pc += if (a >= x) i.jt else i.jf,
- JMP | JSET | X => pc += if (a & x > 0) i.jt else i.jf,
-
- RET | K => return i.k,
- RET | A => return a,
-
- MISC | TAX => x = a,
- MISC | TXA => a = x,
- else => return error.InvalidOpcode,
- }
- }
-
- return error.NoReturn;
-}
-
-// This program is the BPF form of the tcpdump filter:
-//
-// tcpdump -dd 'ip host mirror.internode.on.net and tcp port ftp-data'
-//
-// As of January 2022, mirror.internode.on.net resolves to 150.101.135.3
-//
-// For reference, here's what it looks like in BPF assembler.
-// Note that the jumps are used for TCP/IP layer checks.
-//
-// ```
-// ldh [12] (#proto)
-// jeq #0x0800 (ETHERTYPE_IP), L1, fail
-// L1: ld [26]
-// jeq #150.101.135.3, L2, dest
-// dest: ld [30]
-// jeq #150.101.135.3, L2, fail
-// L2: ldb [23]
-// jeq #0x6 (IPPROTO_TCP), L3, fail
-// L3: ldh [20]
-// jset #0x1fff, fail, plen
-// plen: ldx 4 * ([14] & 0xf)
-// ldh [x + 14]
-// jeq #0x14 (FTP), pass, dstp
-// dstp: ldh [x + 16]
-// jeq #0x14 (FTP), pass, fail
-// pass: ret #0x40000
-// fail: ret #0
-// ```
-const tcpdump_filter = [_]Insn{
- Insn.ld_abs(.half_word, 12),
- Insn.jmp(.jeq, .{ .k = 0x800 }, 0, 14),
- Insn.ld_abs(.word, 26),
- Insn.jmp(.jeq, .{ .k = 0x96658703 }, 2, 0),
- Insn.ld_abs(.word, 30),
- Insn.jmp(.jeq, .{ .k = 0x96658703 }, 0, 10),
- Insn.ld_abs(.byte, 23),
- Insn.jmp(.jeq, .{ .k = 0x6 }, 0, 8),
- Insn.ld_abs(.half_word, 20),
- Insn.jmp(.jset, .{ .k = 0x1fff }, 6, 0),
- Insn.ldx_msh(14),
- Insn.ld_ind(.half_word, 14),
- Insn.jmp(.jeq, .{ .k = 0x14 }, 2, 0),
- Insn.ld_ind(.half_word, 16),
- Insn.jmp(.jeq, .{ .k = 0x14 }, 0, 1),
- Insn.ret(.{ .k = 0x40000 }),
- Insn.ret(.{ .k = 0 }),
-};
-
-// This packet is the output of `ls` on mirror.internode.on.net:/, captured
-// using the filter above.
-//
-// zig fmt: off
-const ftp_data = [_]u8{
- // ethernet - 14 bytes: IPv4(0x0800) from a4:71:74:ad:4b:f0 -> de:ad:be:ef:f0:0f
- 0xde, 0xad, 0xbe, 0xef, 0xf0, 0x0f, 0xa4, 0x71, 0x74, 0xad, 0x4b, 0xf0, 0x08, 0x00,
- // IPv4 - 20 bytes: TCP data from 150.101.135.3 -> 192.168.1.3
- 0x45, 0x00, 0x01, 0xf2, 0x70, 0x3b, 0x40, 0x00, 0x37, 0x06, 0xf2, 0xb6,
- 0x96, 0x65, 0x87, 0x03, 0xc0, 0xa8, 0x01, 0x03,
- // TCP - 32 bytes: Source port: 20 (FTP). Payload = 446 bytes
- 0x00, 0x14, 0x80, 0x6d, 0x35, 0x81, 0x2d, 0x40, 0x4f, 0x8a, 0x29, 0x9e, 0x80, 0x18, 0x00, 0x2e,
- 0x88, 0x8d, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x0b, 0x59, 0x5d, 0x09, 0x32, 0x8b, 0x51, 0xa0
-} ++
- // Raw line-based FTP data - 446 bytes
- "lrwxrwxrwx 1 root root 12 Feb 14 2012 debian -> .pub2/debian\r\n" ++
- "lrwxrwxrwx 1 root root 15 Feb 14 2012 debian-cd -> .pub2/debian-cd\r\n" ++
- "lrwxrwxrwx 1 root root 9 Mar 9 2018 linux -> pub/linux\r\n" ++
- "drwxr-xr-X 3 mirror mirror 4096 Sep 20 08:10 pub\r\n" ++
- "lrwxrwxrwx 1 root root 12 Feb 14 2012 ubuntu -> .pub2/ubuntu\r\n" ++
- "-rw-r--r-- 1 root root 1044 Jan 20 2015 welcome.msg\r\n";
-// zig fmt: on
-
-test "tcpdump filter" {
- try expectEqual(
- @as(u32, 0x40000),
- try simulate(ftp_data, &tcpdump_filter, .Big),
- );
-}
-
-fn expectPass(data: anytype, filter: []const Insn) !void {
- try expectEqual(
- @as(u32, 0),
- try simulate(mem.asBytes(data), filter, .Big),
- );
-}
-
-fn expectFail(expected_error: anyerror, data: anytype, filter: []const Insn) !void {
- try expectError(
- expected_error,
- simulate(mem.asBytes(data), filter, native_endian),
- );
-}
-
-test "simulator coverage" {
- const some_data = [_]u8{
- 0xaa, 0xbb, 0xcc, 0xdd, 0x7f,
- };
-
- try expectPass(&some_data, &.{
- // ld #10
- // ldx #1
- // st M[0]
- // stx M[1]
- // fail if A != 10
- Insn.ld_imm(10),
- Insn.ldx_imm(1),
- Insn.st(.m0),
- Insn.stx(.m1),
- Insn.jmp(.jeq, .{ .k = 10 }, 1, 0),
- Insn.ret(.{ .k = 1 }),
- // ld [0]
- // fail if A != 0xaabbccdd
- Insn.ld_abs(.word, 0),
- Insn.jmp(.jeq, .{ .k = 0xaabbccdd }, 1, 0),
- Insn.ret(.{ .k = 2 }),
- // ldh [0]
- // fail if A != 0xaabb
- Insn.ld_abs(.half_word, 0),
- Insn.jmp(.jeq, .{ .k = 0xaabb }, 1, 0),
- Insn.ret(.{ .k = 3 }),
- // ldb [0]
- // fail if A != 0xaa
- Insn.ld_abs(.byte, 0),
- Insn.jmp(.jeq, .{ .k = 0xaa }, 1, 0),
- Insn.ret(.{ .k = 4 }),
- // ld [x + 0]
- // fail if A != 0xbbccdd7f
- Insn.ld_ind(.word, 0),
- Insn.jmp(.jeq, .{ .k = 0xbbccdd7f }, 1, 0),
- Insn.ret(.{ .k = 5 }),
- // ldh [x + 0]
- // fail if A != 0xbbcc
- Insn.ld_ind(.half_word, 0),
- Insn.jmp(.jeq, .{ .k = 0xbbcc }, 1, 0),
- Insn.ret(.{ .k = 6 }),
- // ldb [x + 0]
- // fail if A != 0xbb
- Insn.ld_ind(.byte, 0),
- Insn.jmp(.jeq, .{ .k = 0xbb }, 1, 0),
- Insn.ret(.{ .k = 7 }),
- // ld M[0]
- // fail if A != 10
- Insn.ld_mem(.m0),
- Insn.jmp(.jeq, .{ .k = 10 }, 1, 0),
- Insn.ret(.{ .k = 8 }),
- // ld #len
- // fail if A != 5
- Insn.ld_len(),
- Insn.jmp(.jeq, .{ .k = some_data.len }, 1, 0),
- Insn.ret(.{ .k = 9 }),
- // ld #0
- // ld arc4random()
- // fail if A == 0
- Insn.ld_imm(0),
- Insn.ld_rnd(),
- Insn.jmp(.jgt, .{ .k = 0 }, 1, 0),
- Insn.ret(.{ .k = 10 }),
- // ld #3
- // ldx #10
- // st M[2]
- // txa
- // fail if a != x
- Insn.ld_imm(3),
- Insn.ldx_imm(10),
- Insn.st(.m2),
- Insn.txa(),
- Insn.jmp(.jeq, .x, 1, 0),
- Insn.ret(.{ .k = 11 }),
- // ldx M[2]
- // fail if A <= X
- Insn.ldx_mem(.m2),
- Insn.jmp(.jgt, .x, 1, 0),
- Insn.ret(.{ .k = 12 }),
- // ldx #len
- // fail if a <= x
- Insn.ldx_len(),
- Insn.jmp(.jgt, .x, 1, 0),
- Insn.ret(.{ .k = 13 }),
- // a = 4 * (0x7f & 0xf)
- // x = 4 * ([4] & 0xf)
- // fail if a != x
- Insn.ld_imm(4 * (0x7f & 0xf)),
- Insn.ldx_msh(4),
- Insn.jmp(.jeq, .x, 1, 0),
- Insn.ret(.{ .k = 14 }),
- // ld #(u32)-1
- // ldx #2
- // add #1
- // fail if a != 0
- Insn.ld_imm(0xffffffff),
- Insn.ldx_imm(2),
- Insn.alu(.add, .{ .k = 1 }),
- Insn.jmp(.jeq, .{ .k = 0 }, 1, 0),
- Insn.ret(.{ .k = 15 }),
- // sub #1
- // fail if a != (u32)-1
- Insn.alu(.sub, .{ .k = 1 }),
- Insn.jmp(.jeq, .{ .k = 0xffffffff }, 1, 0),
- Insn.ret(.{ .k = 16 }),
- // add x
- // fail if a != 1
- Insn.alu(.add, .x),
- Insn.jmp(.jeq, .{ .k = 1 }, 1, 0),
- Insn.ret(.{ .k = 17 }),
- // sub x
- // fail if a != (u32)-1
- Insn.alu(.sub, .x),
- Insn.jmp(.jeq, .{ .k = 0xffffffff }, 1, 0),
- Insn.ret(.{ .k = 18 }),
- // ld #16
- // mul #2
- // fail if a != 32
- Insn.ld_imm(16),
- Insn.alu(.mul, .{ .k = 2 }),
- Insn.jmp(.jeq, .{ .k = 32 }, 1, 0),
- Insn.ret(.{ .k = 19 }),
- // mul x
- // fail if a != 64
- Insn.alu(.mul, .x),
- Insn.jmp(.jeq, .{ .k = 64 }, 1, 0),
- Insn.ret(.{ .k = 20 }),
- // div #2
- // fail if a != 32
- Insn.alu(.div, .{ .k = 2 }),
- Insn.jmp(.jeq, .{ .k = 32 }, 1, 0),
- Insn.ret(.{ .k = 21 }),
- // div x
- // fail if a != 16
- Insn.alu(.div, .x),
- Insn.jmp(.jeq, .{ .k = 16 }, 1, 0),
- Insn.ret(.{ .k = 22 }),
- // or #4
- // fail if a != 20
- Insn.alu(.@"or", .{ .k = 4 }),
- Insn.jmp(.jeq, .{ .k = 20 }, 1, 0),
- Insn.ret(.{ .k = 23 }),
- // or x
- // fail if a != 22
- Insn.alu(.@"or", .x),
- Insn.jmp(.jeq, .{ .k = 22 }, 1, 0),
- Insn.ret(.{ .k = 24 }),
- // and #6
- // fail if a != 6
- Insn.alu(.@"and", .{ .k = 0b110 }),
- Insn.jmp(.jeq, .{ .k = 6 }, 1, 0),
- Insn.ret(.{ .k = 25 }),
- // and x
- // fail if a != 2
- Insn.alu(.@"and", .x),
- Insn.jmp(.jeq, .x, 1, 0),
- Insn.ret(.{ .k = 26 }),
- // xor #15
- // fail if a != 13
- Insn.alu(.xor, .{ .k = 0b1111 }),
- Insn.jmp(.jeq, .{ .k = 0b1101 }, 1, 0),
- Insn.ret(.{ .k = 27 }),
- // xor x
- // fail if a != 15
- Insn.alu(.xor, .x),
- Insn.jmp(.jeq, .{ .k = 0b1111 }, 1, 0),
- Insn.ret(.{ .k = 28 }),
- // rsh #1
- // fail if a != 7
- Insn.alu(.rsh, .{ .k = 1 }),
- Insn.jmp(.jeq, .{ .k = 0b0111 }, 1, 0),
- Insn.ret(.{ .k = 29 }),
- // rsh x
- // fail if a != 1
- Insn.alu(.rsh, .x),
- Insn.jmp(.jeq, .{ .k = 0b0001 }, 1, 0),
- Insn.ret(.{ .k = 30 }),
- // lsh #1
- // fail if a != 2
- Insn.alu(.lsh, .{ .k = 1 }),
- Insn.jmp(.jeq, .{ .k = 0b0010 }, 1, 0),
- Insn.ret(.{ .k = 31 }),
- // lsh x
- // fail if a != 8
- Insn.alu(.lsh, .x),
- Insn.jmp(.jeq, .{ .k = 0b1000 }, 1, 0),
- Insn.ret(.{ .k = 32 }),
- // mod 6
- // fail if a != 2
- Insn.alu(.mod, .{ .k = 6 }),
- Insn.jmp(.jeq, .{ .k = 2 }, 1, 0),
- Insn.ret(.{ .k = 33 }),
- // mod x
- // fail if a != 0
- Insn.alu(.mod, .x),
- Insn.jmp(.jeq, .{ .k = 0 }, 1, 0),
- Insn.ret(.{ .k = 34 }),
- // tax
- // neg
- // fail if a != (u32)-2
- Insn.txa(),
- Insn.alu_neg(),
- Insn.jmp(.jeq, .{ .k = ~@as(u32, 2) + 1 }, 1, 0),
- Insn.ret(.{ .k = 35 }),
- // ja #1 (skip the next instruction)
- Insn.jmp_ja(1),
- Insn.ret(.{ .k = 36 }),
- // ld #20
- // tax
- // fail if a != 20
- // fail if a != x
- Insn.ld_imm(20),
- Insn.tax(),
- Insn.jmp(.jeq, .{ .k = 20 }, 1, 0),
- Insn.ret(.{ .k = 37 }),
- Insn.jmp(.jeq, .x, 1, 0),
- Insn.ret(.{ .k = 38 }),
- // ld #19
- // fail if a == 20
- // fail if a == x
- // fail if a >= 20
- // fail if a >= X
- Insn.ld_imm(19),
- Insn.jmp(.jeq, .{ .k = 20 }, 0, 1),
- Insn.ret(.{ .k = 39 }),
- Insn.jmp(.jeq, .x, 0, 1),
- Insn.ret(.{ .k = 40 }),
- Insn.jmp(.jgt, .{ .k = 20 }, 0, 1),
- Insn.ret(.{ .k = 41 }),
- Insn.jmp(.jgt, .x, 0, 1),
- Insn.ret(.{ .k = 42 }),
- // ld #21
- // fail if a < 20
- // fail if a < x
- Insn.ld_imm(21),
- Insn.jmp(.jgt, .{ .k = 20 }, 1, 0),
- Insn.ret(.{ .k = 43 }),
- Insn.jmp(.jgt, .x, 1, 0),
- Insn.ret(.{ .k = 44 }),
- // ldx #22
- // fail if a < 22
- // fail if a < x
- Insn.ldx_imm(22),
- Insn.jmp(.jge, .{ .k = 22 }, 0, 1),
- Insn.ret(.{ .k = 45 }),
- Insn.jmp(.jge, .x, 0, 1),
- Insn.ret(.{ .k = 46 }),
- // ld #23
- // fail if a >= 22
- // fail if a >= x
- Insn.ld_imm(23),
- Insn.jmp(.jge, .{ .k = 22 }, 1, 0),
- Insn.ret(.{ .k = 47 }),
- Insn.jmp(.jge, .x, 1, 0),
- Insn.ret(.{ .k = 48 }),
- // ldx #0b10100
- // fail if a & 0b10100 == 0
- // fail if a & x == 0
- Insn.ldx_imm(0b10100),
- Insn.jmp(.jset, .{ .k = 0b10100 }, 1, 0),
- Insn.ret(.{ .k = 47 }),
- Insn.jmp(.jset, .x, 1, 0),
- Insn.ret(.{ .k = 48 }),
- // ldx #0
- // fail if a & 0 > 0
- // fail if a & x > 0
- Insn.ldx_imm(0),
- Insn.jmp(.jset, .{ .k = 0 }, 0, 1),
- Insn.ret(.{ .k = 49 }),
- Insn.jmp(.jset, .x, 0, 1),
- Insn.ret(.{ .k = 50 }),
- Insn.ret(.{ .k = 0 }),
- });
- try expectPass(&some_data, &.{
- Insn.ld_imm(35),
- Insn.ld_imm(0),
- Insn.ret(.a),
- });
-
- // Errors
- try expectFail(error.NoReturn, &some_data, &.{
- Insn.ld_imm(10),
- });
- try expectFail(error.InvalidOpcode, &some_data, &.{
- Insn.stmt(0x7f, 0xdeadbeef),
- });
- try expectFail(error.InvalidOffset, &some_data, &.{
- Insn.stmt(LD | ABS | W, 10),
- });
- try expectFail(error.InvalidLocation, &some_data, &.{
- Insn.jmp(.jeq, .{ .k = 0 }, 10, 0),
- });
- try expectFail(error.InvalidLocation, &some_data, &.{
- Insn.jmp(.jeq, .{ .k = 0 }, 0, 10),
- });
-}
diff --git a/lib/std/x/net/ip.zig b/lib/std/x/net/ip.zig
deleted file mode 100644
index b3da9725d8..0000000000
--- a/lib/std/x/net/ip.zig
+++ /dev/null
@@ -1,57 +0,0 @@
-const std = @import("../../std.zig");
-
-const fmt = std.fmt;
-
-const IPv4 = std.x.os.IPv4;
-const IPv6 = std.x.os.IPv6;
-const Socket = std.x.os.Socket;
-
-/// A generic IP abstraction.
-const ip = @This();
-
-/// A union of all eligible types of IP addresses.
-pub const Address = union(enum) {
- ipv4: IPv4.Address,
- ipv6: IPv6.Address,
-
- /// Instantiate a new address with a IPv4 host and port.
- pub fn initIPv4(host: IPv4, port: u16) Address {
- return .{ .ipv4 = .{ .host = host, .port = port } };
- }
-
- /// Instantiate a new address with a IPv6 host and port.
- pub fn initIPv6(host: IPv6, port: u16) Address {
- return .{ .ipv6 = .{ .host = host, .port = port } };
- }
-
- /// Re-interpret a generic socket address into an IP address.
- pub fn from(address: Socket.Address) ip.Address {
- return switch (address) {
- .ipv4 => |ipv4_address| .{ .ipv4 = ipv4_address },
- .ipv6 => |ipv6_address| .{ .ipv6 = ipv6_address },
- };
- }
-
- /// Re-interpret an IP address into a generic socket address.
- pub fn into(self: ip.Address) Socket.Address {
- return switch (self) {
- .ipv4 => |ipv4_address| .{ .ipv4 = ipv4_address },
- .ipv6 => |ipv6_address| .{ .ipv6 = ipv6_address },
- };
- }
-
- /// Implements the `std.fmt.format` API.
- pub fn format(
- self: ip.Address,
- comptime layout: []const u8,
- opts: fmt.FormatOptions,
- writer: anytype,
- ) !void {
- if (layout.len != 0) std.fmt.invalidFmtError(layout, self);
- _ = opts;
- switch (self) {
- .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
- .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
- }
- }
-};
diff --git a/lib/std/x/net/tcp.zig b/lib/std/x/net/tcp.zig
deleted file mode 100644
index 0293deb9db..0000000000
--- a/lib/std/x/net/tcp.zig
+++ /dev/null
@@ -1,447 +0,0 @@
-const std = @import("../../std.zig");
-const builtin = @import("builtin");
-
-const io = std.io;
-const os = std.os;
-const ip = std.x.net.ip;
-
-const fmt = std.fmt;
-const mem = std.mem;
-const testing = std.testing;
-const native_os = builtin.os;
-
-const IPv4 = std.x.os.IPv4;
-const IPv6 = std.x.os.IPv6;
-const Socket = std.x.os.Socket;
-const Buffer = std.x.os.Buffer;
-
-/// A generic TCP socket abstraction.
-const tcp = @This();
-
-/// A TCP client-address pair.
-pub const Connection = struct {
- client: tcp.Client,
- address: ip.Address,
-
- /// Enclose a TCP client and address into a client-address pair.
- pub fn from(conn: Socket.Connection) tcp.Connection {
- return .{
- .client = tcp.Client.from(conn.socket),
- .address = ip.Address.from(conn.address),
- };
- }
-
- /// Unravel a TCP client-address pair into a socket-address pair.
- pub fn into(self: tcp.Connection) Socket.Connection {
- return .{
- .socket = self.client.socket,
- .address = self.address.into(),
- };
- }
-
- /// Closes the underlying client of the connection.
- pub fn deinit(self: tcp.Connection) void {
- self.client.deinit();
- }
-};
-
-/// Possible domains that a TCP client/listener may operate over.
-pub const Domain = enum(u16) {
- ip = os.AF.INET,
- ipv6 = os.AF.INET6,
-};
-
-/// A TCP client.
-pub const Client = struct {
- socket: Socket,
-
- /// Implements `std.io.Reader`.
- pub const Reader = struct {
- client: Client,
- flags: u32,
-
- /// Implements `readFn` for `std.io.Reader`.
- pub fn read(self: Client.Reader, buffer: []u8) !usize {
- return self.client.read(buffer, self.flags);
- }
- };
-
- /// Implements `std.io.Writer`.
- pub const Writer = struct {
- client: Client,
- flags: u32,
-
- /// Implements `writeFn` for `std.io.Writer`.
- pub fn write(self: Client.Writer, buffer: []const u8) !usize {
- return self.client.write(buffer, self.flags);
- }
- };
-
- /// Opens a new client.
- pub fn init(domain: tcp.Domain, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Client {
- return Client{
- .socket = try Socket.init(
- @enumToInt(domain),
- os.SOCK.STREAM,
- os.IPPROTO.TCP,
- flags,
- ),
- };
- }
-
- /// Enclose a TCP client over an existing socket.
- pub fn from(socket: Socket) Client {
- return Client{ .socket = socket };
- }
-
- /// Closes the client.
- pub fn deinit(self: Client) void {
- self.socket.deinit();
- }
-
- /// Shutdown either the read side, write side, or all sides of the client's underlying socket.
- pub fn shutdown(self: Client, how: os.ShutdownHow) !void {
- return self.socket.shutdown(how);
- }
-
- /// Have the client attempt to the connect to an address.
- pub fn connect(self: Client, address: ip.Address) !void {
- return self.socket.connect(address.into());
- }
-
- /// Extracts the error set of a function.
- /// TODO: remove after Socket.{read, write} error unions are well-defined across different platforms
- fn ErrorSetOf(comptime Function: anytype) type {
- return @typeInfo(@typeInfo(@TypeOf(Function)).Fn.return_type.?).ErrorUnion.error_set;
- }
-
- /// Wrap `tcp.Client` into `std.io.Reader`.
- pub fn reader(self: Client, flags: u32) io.Reader(Client.Reader, ErrorSetOf(Client.Reader.read), Client.Reader.read) {
- return .{ .context = .{ .client = self, .flags = flags } };
- }
-
- /// Wrap `tcp.Client` into `std.io.Writer`.
- pub fn writer(self: Client, flags: u32) io.Writer(Client.Writer, ErrorSetOf(Client.Writer.write), Client.Writer.write) {
- return .{ .context = .{ .client = self, .flags = flags } };
- }
-
- /// Read data from the socket into the buffer provided with a set of flags
- /// specified. It returns the number of bytes read into the buffer provided.
- pub fn read(self: Client, buf: []u8, flags: u32) !usize {
- return self.socket.read(buf, flags);
- }
-
- /// Write a buffer of data provided to the socket with a set of flags specified.
- /// It returns the number of bytes that are written to the socket.
- pub fn write(self: Client, buf: []const u8, flags: u32) !usize {
- return self.socket.write(buf, flags);
- }
-
- /// Writes multiple I/O vectors with a prepended message header to the socket
- /// with a set of flags specified. It returns the number of bytes that are
- /// written to the socket.
- pub fn writeMessage(self: Client, msg: Socket.Message, flags: u32) !usize {
- return self.socket.writeMessage(msg, flags);
- }
-
- /// Read multiple I/O vectors with a prepended message header from the socket
- /// with a set of flags specified. It returns the number of bytes that were
- /// read into the buffer provided.
- pub fn readMessage(self: Client, msg: *Socket.Message, flags: u32) !usize {
- return self.socket.readMessage(msg, flags);
- }
-
- /// Query and return the latest cached error on the client's underlying socket.
- pub fn getError(self: Client) !void {
- return self.socket.getError();
- }
-
- /// Query the read buffer size of the client's underlying socket.
- pub fn getReadBufferSize(self: Client) !u32 {
- return self.socket.getReadBufferSize();
- }
-
- /// Query the write buffer size of the client's underlying socket.
- pub fn getWriteBufferSize(self: Client) !u32 {
- return self.socket.getWriteBufferSize();
- }
-
- /// Query the address that the client's socket is locally bounded to.
- pub fn getLocalAddress(self: Client) !ip.Address {
- return ip.Address.from(try self.socket.getLocalAddress());
- }
-
- /// Query the address that the socket is connected to.
- pub fn getRemoteAddress(self: Client) !ip.Address {
- return ip.Address.from(try self.socket.getRemoteAddress());
- }
-
- /// Have close() or shutdown() syscalls block until all queued messages in the client have been successfully
- /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption`
- /// if the host does not support the option for a socket to linger around up until a timeout specified in
- /// seconds.
- pub fn setLinger(self: Client, timeout_seconds: ?u16) !void {
- return self.socket.setLinger(timeout_seconds);
- }
-
- /// Have keep-alive messages be sent periodically. The timing in which keep-alive messages are sent are
- /// dependant on operating system settings. It returns `error.UnsupportedSocketOption` if the host does
- /// not support periodically sending keep-alive messages on connection-oriented sockets.
- pub fn setKeepAlive(self: Client, enabled: bool) !void {
- return self.socket.setKeepAlive(enabled);
- }
-
- /// Disable Nagle's algorithm on a TCP socket. It returns `error.UnsupportedSocketOption` if
- /// the host does not support sockets disabling Nagle's algorithm.
- pub fn setNoDelay(self: Client, enabled: bool) !void {
- if (@hasDecl(os.TCP, "NODELAY")) {
- const bytes = mem.asBytes(&@as(usize, @boolToInt(enabled)));
- return self.socket.setOption(os.IPPROTO.TCP, os.TCP.NODELAY, bytes);
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns
- /// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK.
- pub fn setQuickACK(self: Client, enabled: bool) !void {
- if (@hasDecl(os.TCP, "QUICKACK")) {
- return self.socket.setOption(os.IPPROTO.TCP, os.TCP.QUICKACK, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Set the write buffer size of the socket.
- pub fn setWriteBufferSize(self: Client, size: u32) !void {
- return self.socket.setWriteBufferSize(size);
- }
-
- /// Set the read buffer size of the socket.
- pub fn setReadBufferSize(self: Client, size: u32) !void {
- return self.socket.setReadBufferSize(size);
- }
-
- /// Set a timeout on the socket that is to occur if no messages are successfully written
- /// to its bound destination after a specified number of milliseconds. A subsequent write
- /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
- pub fn setWriteTimeout(self: Client, milliseconds: u32) !void {
- return self.socket.setWriteTimeout(milliseconds);
- }
-
- /// Set a timeout on the socket that is to occur if no messages are successfully read
- /// from its bound destination after a specified number of milliseconds. A subsequent
- /// read from the socket will thereafter return `error.WouldBlock` should the timeout be
- /// exceeded.
- pub fn setReadTimeout(self: Client, milliseconds: u32) !void {
- return self.socket.setReadTimeout(milliseconds);
- }
-};
-
-/// A TCP listener.
-pub const Listener = struct {
- socket: Socket,
-
- /// Opens a new listener.
- pub fn init(domain: tcp.Domain, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Listener {
- return Listener{
- .socket = try Socket.init(
- @enumToInt(domain),
- os.SOCK.STREAM,
- os.IPPROTO.TCP,
- flags,
- ),
- };
- }
-
- /// Closes the listener.
- pub fn deinit(self: Listener) void {
- self.socket.deinit();
- }
-
- /// Shuts down the underlying listener's socket. The next subsequent call, or
- /// a current pending call to accept() after shutdown is called will return
- /// an error.
- pub fn shutdown(self: Listener) !void {
- return self.socket.shutdown(.recv);
- }
-
- /// Binds the listener's socket to an address.
- pub fn bind(self: Listener, address: ip.Address) !void {
- return self.socket.bind(address.into());
- }
-
- /// Start listening for incoming connections.
- pub fn listen(self: Listener, max_backlog_size: u31) !void {
- return self.socket.listen(max_backlog_size);
- }
-
- /// Accept a pending incoming connection queued to the kernel backlog
- /// of the listener's socket.
- pub fn accept(self: Listener, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !tcp.Connection {
- return tcp.Connection.from(try self.socket.accept(flags));
- }
-
- /// Query and return the latest cached error on the listener's underlying socket.
- pub fn getError(self: Client) !void {
- return self.socket.getError();
- }
-
- /// Query the address that the listener's socket is locally bounded to.
- pub fn getLocalAddress(self: Listener) !ip.Address {
- return ip.Address.from(try self.socket.getLocalAddress());
- }
-
- /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
- /// the host does not support sockets listening the same address.
- pub fn setReuseAddress(self: Listener, enabled: bool) !void {
- return self.socket.setReuseAddress(enabled);
- }
-
- /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
- /// the host does not supports sockets listening on the same port.
- pub fn setReusePort(self: Listener, enabled: bool) !void {
- return self.socket.setReusePort(enabled);
- }
-
- /// Enables TCP Fast Open (RFC 7413) on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not
- /// support TCP Fast Open.
- pub fn setFastOpen(self: Listener, enabled: bool) !void {
- if (@hasDecl(os.TCP, "FASTOPEN")) {
- return self.socket.setOption(os.IPPROTO.TCP, os.TCP.FASTOPEN, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Set a timeout on the listener that is to occur if no new incoming connections come in
- /// after a specified number of milliseconds. A subsequent accept call to the listener
- /// will thereafter return `error.WouldBlock` should the timeout be exceeded.
- pub fn setAcceptTimeout(self: Listener, milliseconds: usize) !void {
- return self.socket.setReadTimeout(milliseconds);
- }
-};
-
-test "tcp: create client/listener pair" {
- if (native_os.tag == .wasi) return error.SkipZigTest;
-
- const listener = try tcp.Listener.init(.ip, .{ .close_on_exec = true });
- defer listener.deinit();
-
- try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
- try listener.listen(128);
-
- var binded_address = try listener.getLocalAddress();
- switch (binded_address) {
- .ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
- .ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
- }
-
- const client = try tcp.Client.init(.ip, .{ .close_on_exec = true });
- defer client.deinit();
-
- try client.connect(binded_address);
-
- const conn = try listener.accept(.{ .close_on_exec = true });
- defer conn.deinit();
-}
-
-test "tcp/client: 1ms read timeout" {
- if (native_os.tag == .wasi) return error.SkipZigTest;
-
- const listener = try tcp.Listener.init(.ip, .{ .close_on_exec = true });
- defer listener.deinit();
-
- try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
- try listener.listen(128);
-
- var binded_address = try listener.getLocalAddress();
- switch (binded_address) {
- .ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
- .ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
- }
-
- const client = try tcp.Client.init(.ip, .{ .close_on_exec = true });
- defer client.deinit();
-
- try client.connect(binded_address);
- try client.setReadTimeout(1);
-
- const conn = try listener.accept(.{ .close_on_exec = true });
- defer conn.deinit();
-
- var buf: [1]u8 = undefined;
- try testing.expectError(error.WouldBlock, client.reader(0).read(&buf));
-}
-
-test "tcp/client: read and write multiple vectors" {
- if (native_os.tag == .wasi) return error.SkipZigTest;
-
- if (builtin.os.tag == .windows) {
- // https://github.com/ziglang/zig/issues/13893
- return error.SkipZigTest;
- }
-
- const listener = try tcp.Listener.init(.ip, .{ .close_on_exec = true });
- defer listener.deinit();
-
- try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
- try listener.listen(128);
-
- var binded_address = try listener.getLocalAddress();
- switch (binded_address) {
- .ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
- .ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
- }
-
- const client = try tcp.Client.init(.ip, .{ .close_on_exec = true });
- defer client.deinit();
-
- try client.connect(binded_address);
-
- const conn = try listener.accept(.{ .close_on_exec = true });
- defer conn.deinit();
-
- const message = "hello world";
- _ = try conn.client.writeMessage(Socket.Message.fromBuffers(&[_]Buffer{
- Buffer.from(message[0 .. message.len / 2]),
- Buffer.from(message[message.len / 2 ..]),
- }), 0);
-
- var buf: [message.len + 1]u8 = undefined;
- var msg = Socket.Message.fromBuffers(&[_]Buffer{
- Buffer.from(buf[0 .. message.len / 2]),
- Buffer.from(buf[message.len / 2 ..]),
- });
- _ = try client.readMessage(&msg, 0);
-
- try testing.expectEqualStrings(message, buf[0..message.len]);
-}
-
-test "tcp/listener: bind to unspecified ipv4 address" {
- if (native_os.tag == .wasi) return error.SkipZigTest;
-
- const listener = try tcp.Listener.init(.ip, .{ .close_on_exec = true });
- defer listener.deinit();
-
- try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
- try listener.listen(128);
-
- const address = try listener.getLocalAddress();
- try testing.expect(address == .ipv4);
-}
-
-test "tcp/listener: bind to unspecified ipv6 address" {
- if (native_os.tag == .wasi) return error.SkipZigTest;
-
- if (builtin.os.tag == .windows) {
- // https://github.com/ziglang/zig/issues/13893
- return error.SkipZigTest;
- }
-
- const listener = try tcp.Listener.init(.ipv6, .{ .close_on_exec = true });
- defer listener.deinit();
-
- try listener.bind(ip.Address.initIPv6(IPv6.unspecified, 0));
- try listener.listen(128);
-
- const address = try listener.getLocalAddress();
- try testing.expect(address == .ipv6);
-}
diff --git a/lib/std/x/os/io.zig b/lib/std/x/os/io.zig
deleted file mode 100644
index 6c4763df65..0000000000
--- a/lib/std/x/os/io.zig
+++ /dev/null
@@ -1,224 +0,0 @@
-const std = @import("../../std.zig");
-const builtin = @import("builtin");
-
-const os = std.os;
-const mem = std.mem;
-const testing = std.testing;
-const native_os = builtin.os;
-const linux = std.os.linux;
-
-/// POSIX `iovec`, or Windows `WSABUF`. The difference between the two are the ordering
-/// of fields, alongside the length being represented as either a ULONG or a size_t.
-pub const Buffer = if (native_os.tag == .windows)
- extern struct {
- len: c_ulong,
- ptr: usize,
-
- pub fn from(slice: []const u8) Buffer {
- return .{ .len = @intCast(c_ulong, slice.len), .ptr = @ptrToInt(slice.ptr) };
- }
-
- pub fn into(self: Buffer) []const u8 {
- return @intToPtr([*]const u8, self.ptr)[0..self.len];
- }
-
- pub fn intoMutable(self: Buffer) []u8 {
- return @intToPtr([*]u8, self.ptr)[0..self.len];
- }
- }
-else
- extern struct {
- ptr: usize,
- len: usize,
-
- pub fn from(slice: []const u8) Buffer {
- return .{ .ptr = @ptrToInt(slice.ptr), .len = slice.len };
- }
-
- pub fn into(self: Buffer) []const u8 {
- return @intToPtr([*]const u8, self.ptr)[0..self.len];
- }
-
- pub fn intoMutable(self: Buffer) []u8 {
- return @intToPtr([*]u8, self.ptr)[0..self.len];
- }
- };
-
-pub const Reactor = struct {
- pub const InitFlags = enum {
- close_on_exec,
- };
-
- pub const Event = struct {
- data: usize,
- is_error: bool,
- is_hup: bool,
- is_readable: bool,
- is_writable: bool,
- };
-
- pub const Interest = struct {
- hup: bool = false,
- oneshot: bool = false,
- readable: bool = false,
- writable: bool = false,
- };
-
- fd: os.fd_t,
-
- pub fn init(flags: std.enums.EnumFieldStruct(Reactor.InitFlags, bool, false)) !Reactor {
- var raw_flags: u32 = 0;
- const set = std.EnumSet(Reactor.InitFlags).init(flags);
- if (set.contains(.close_on_exec)) raw_flags |= linux.EPOLL.CLOEXEC;
- return Reactor{ .fd = try os.epoll_create1(raw_flags) };
- }
-
- pub fn deinit(self: Reactor) void {
- os.close(self.fd);
- }
-
- pub fn update(self: Reactor, fd: os.fd_t, identifier: usize, interest: Reactor.Interest) !void {
- var flags: u32 = 0;
- flags |= if (interest.oneshot) linux.EPOLL.ONESHOT else linux.EPOLL.ET;
- if (interest.hup) flags |= linux.EPOLL.RDHUP;
- if (interest.readable) flags |= linux.EPOLL.IN;
- if (interest.writable) flags |= linux.EPOLL.OUT;
-
- const event = &linux.epoll_event{
- .events = flags,
- .data = .{ .ptr = identifier },
- };
-
- os.epoll_ctl(self.fd, linux.EPOLL.CTL_MOD, fd, event) catch |err| switch (err) {
- error.FileDescriptorNotRegistered => try os.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, fd, event),
- else => return err,
- };
- }
-
- pub fn remove(self: Reactor, fd: os.fd_t) !void {
- // directly from man epoll_ctl BUGS section
- // In kernel versions before 2.6.9, the EPOLL_CTL_DEL operation re‐
- // quired a non-null pointer in event, even though this argument is
- // ignored. Since Linux 2.6.9, event can be specified as NULL when
- // using EPOLL_CTL_DEL. Applications that need to be portable to
- // kernels before 2.6.9 should specify a non-null pointer in event.
- var event = linux.epoll_event{
- .events = 0,
- .data = .{ .ptr = 0 },
- };
-
- return os.epoll_ctl(self.fd, linux.EPOLL.CTL_DEL, fd, &event);
- }
-
- pub fn poll(self: Reactor, comptime max_num_events: comptime_int, closure: anytype, timeout_milliseconds: ?u64) !void {
- var events: [max_num_events]linux.epoll_event = undefined;
-
- const num_events = os.epoll_wait(self.fd, &events, if (timeout_milliseconds) |ms| @intCast(i32, ms) else -1);
- for (events[0..num_events]) |ev| {
- const is_error = ev.events & linux.EPOLL.ERR != 0;
- const is_hup = ev.events & (linux.EPOLL.HUP | linux.EPOLL.RDHUP) != 0;
- const is_readable = ev.events & linux.EPOLL.IN != 0;
- const is_writable = ev.events & linux.EPOLL.OUT != 0;
-
- try closure.call(Reactor.Event{
- .data = ev.data.ptr,
- .is_error = is_error,
- .is_hup = is_hup,
- .is_readable = is_readable,
- .is_writable = is_writable,
- });
- }
- }
-};
-
-test "reactor/linux: drive async tcp client/listener pair" {
- if (native_os.tag != .linux) return error.SkipZigTest;
-
- const ip = std.x.net.ip;
- const tcp = std.x.net.tcp;
-
- const IPv4 = std.x.os.IPv4;
- const IPv6 = std.x.os.IPv6;
-
- const reactor = try Reactor.init(.{ .close_on_exec = true });
- defer reactor.deinit();
-
- const listener = try tcp.Listener.init(.ip, .{
- .close_on_exec = true,
- .nonblocking = true,
- });
- defer listener.deinit();
-
- try reactor.update(listener.socket.fd, 0, .{ .readable = true });
- try reactor.poll(1, struct {
- fn call(event: Reactor.Event) !void {
- try testing.expectEqual(Reactor.Event{
- .data = 0,
- .is_error = false,
- .is_hup = true,
- .is_readable = false,
- .is_writable = false,
- }, event);
- }
- }, null);
-
- try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
- try listener.listen(128);
-
- var binded_address = try listener.getLocalAddress();
- switch (binded_address) {
- .ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
- .ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
- }
-
- const client = try tcp.Client.init(.ip, .{
- .close_on_exec = true,
- .nonblocking = true,
- });
- defer client.deinit();
-
- try reactor.update(client.socket.fd, 1, .{ .readable = true, .writable = true });
- try reactor.poll(1, struct {
- fn call(event: Reactor.Event) !void {
- try testing.expectEqual(Reactor.Event{
- .data = 1,
- .is_error = false,
- .is_hup = true,
- .is_readable = false,
- .is_writable = true,
- }, event);
- }
- }, null);
-
- client.connect(binded_address) catch |err| switch (err) {
- error.WouldBlock => {},
- else => return err,
- };
-
- try reactor.poll(1, struct {
- fn call(event: Reactor.Event) !void {
- try testing.expectEqual(Reactor.Event{
- .data = 1,
- .is_error = false,
- .is_hup = false,
- .is_readable = false,
- .is_writable = true,
- }, event);
- }
- }, null);
-
- try reactor.poll(1, struct {
- fn call(event: Reactor.Event) !void {
- try testing.expectEqual(Reactor.Event{
- .data = 0,
- .is_error = false,
- .is_hup = false,
- .is_readable = true,
- .is_writable = false,
- }, event);
- }
- }, null);
-
- try reactor.remove(client.socket.fd);
- try reactor.remove(listener.socket.fd);
-}
diff --git a/lib/std/x/os/net.zig b/lib/std/x/os/net.zig
deleted file mode 100644
index e00299e243..0000000000
--- a/lib/std/x/os/net.zig
+++ /dev/null
@@ -1,605 +0,0 @@
-const std = @import("../../std.zig");
-const builtin = @import("builtin");
-
-const os = std.os;
-const fmt = std.fmt;
-const mem = std.mem;
-const math = std.math;
-const testing = std.testing;
-const native_os = builtin.os;
-const have_ifnamesize = @hasDecl(os.system, "IFNAMESIZE");
-
-pub const ResolveScopeIdError = error{
- NameTooLong,
- PermissionDenied,
- AddressFamilyNotSupported,
- ProtocolFamilyNotAvailable,
- ProcessFdQuotaExceeded,
- SystemFdQuotaExceeded,
- SystemResources,
- ProtocolNotSupported,
- SocketTypeNotSupported,
- InterfaceNotFound,
- FileSystem,
- Unexpected,
-};
-
-/// Resolves a network interface name into a scope/zone ID. It returns
-/// an error if either resolution fails, or if the interface name is
-/// too long.
-pub fn resolveScopeId(name: []const u8) ResolveScopeIdError!u32 {
- if (have_ifnamesize) {
- if (name.len >= os.IFNAMESIZE) return error.NameTooLong;
-
- if (native_os.tag == .windows or comptime native_os.tag.isDarwin()) {
- var interface_name: [os.IFNAMESIZE:0]u8 = undefined;
- mem.copy(u8, &interface_name, name);
- interface_name[name.len] = 0;
-
- const rc = blk: {
- if (native_os.tag == .windows) {
- break :blk os.windows.ws2_32.if_nametoindex(@ptrCast([*:0]const u8, &interface_name));
- } else {
- const index = os.system.if_nametoindex(@ptrCast([*:0]const u8, &interface_name));
- break :blk @bitCast(u32, index);
- }
- };
- if (rc == 0) {
- return error.InterfaceNotFound;
- }
- return rc;
- }
-
- if (native_os.tag == .linux) {
- const fd = try os.socket(os.AF.INET, os.SOCK.DGRAM, 0);
- defer os.closeSocket(fd);
-
- var f: os.ifreq = undefined;
- mem.copy(u8, &f.ifrn.name, name);
- f.ifrn.name[name.len] = 0;
-
- try os.ioctl_SIOCGIFINDEX(fd, &f);
-
- return @bitCast(u32, f.ifru.ivalue);
- }
- }
-
- return error.InterfaceNotFound;
-}
-
-/// An IPv4 address comprised of 4 bytes.
-pub const IPv4 = extern struct {
- /// A IPv4 host-port pair.
- pub const Address = extern struct {
- host: IPv4,
- port: u16,
- };
-
- /// Octets of a IPv4 address designating the local host.
- pub const localhost_octets = [_]u8{ 127, 0, 0, 1 };
-
- /// The IPv4 address of the local host.
- pub const localhost: IPv4 = .{ .octets = localhost_octets };
-
- /// Octets of an unspecified IPv4 address.
- pub const unspecified_octets = [_]u8{0} ** 4;
-
- /// An unspecified IPv4 address.
- pub const unspecified: IPv4 = .{ .octets = unspecified_octets };
-
- /// Octets of a broadcast IPv4 address.
- pub const broadcast_octets = [_]u8{255} ** 4;
-
- /// An IPv4 broadcast address.
- pub const broadcast: IPv4 = .{ .octets = broadcast_octets };
-
- /// The prefix octet pattern of a link-local IPv4 address.
- pub const link_local_prefix = [_]u8{ 169, 254 };
-
- /// The prefix octet patterns of IPv4 addresses intended for
- /// documentation.
- pub const documentation_prefixes = [_][]const u8{
- &[_]u8{ 192, 0, 2 },
- &[_]u8{ 198, 51, 100 },
- &[_]u8{ 203, 0, 113 },
- };
-
- octets: [4]u8,
-
- /// Returns whether or not the two addresses are equal to, less than, or
- /// greater than each other.
- pub fn cmp(self: IPv4, other: IPv4) math.Order {
- return mem.order(u8, &self.octets, &other.octets);
- }
-
- /// Returns true if both addresses are semantically equivalent.
- pub fn eql(self: IPv4, other: IPv4) bool {
- return mem.eql(u8, &self.octets, &other.octets);
- }
-
- /// Returns true if the address is a loopback address.
- pub fn isLoopback(self: IPv4) bool {
- return self.octets[0] == 127;
- }
-
- /// Returns true if the address is an unspecified IPv4 address.
- pub fn isUnspecified(self: IPv4) bool {
- return mem.eql(u8, &self.octets, &unspecified_octets);
- }
-
- /// Returns true if the address is a private IPv4 address.
- pub fn isPrivate(self: IPv4) bool {
- return self.octets[0] == 10 or
- (self.octets[0] == 172 and self.octets[1] >= 16 and self.octets[1] <= 31) or
- (self.octets[0] == 192 and self.octets[1] == 168);
- }
-
- /// Returns true if the address is a link-local IPv4 address.
- pub fn isLinkLocal(self: IPv4) bool {
- return mem.startsWith(u8, &self.octets, &link_local_prefix);
- }
-
- /// Returns true if the address is a multicast IPv4 address.
- pub fn isMulticast(self: IPv4) bool {
- return self.octets[0] >= 224 and self.octets[0] <= 239;
- }
-
- /// Returns true if the address is a IPv4 broadcast address.
- pub fn isBroadcast(self: IPv4) bool {
- return mem.eql(u8, &self.octets, &broadcast_octets);
- }
-
- /// Returns true if the address is in a range designated for documentation. Refer
- /// to IETF RFC 5737 for more details.
- pub fn isDocumentation(self: IPv4) bool {
- inline for (documentation_prefixes) |prefix| {
- if (mem.startsWith(u8, &self.octets, prefix)) {
- return true;
- }
- }
- return false;
- }
-
- /// Implements the `std.fmt.format` API.
- pub fn format(
- self: IPv4,
- comptime layout: []const u8,
- opts: fmt.FormatOptions,
- writer: anytype,
- ) !void {
- _ = opts;
- if (layout.len != 0) std.fmt.invalidFmtError(layout, self);
-
- try fmt.format(writer, "{}.{}.{}.{}", .{
- self.octets[0],
- self.octets[1],
- self.octets[2],
- self.octets[3],
- });
- }
-
- /// Set of possible errors that may encountered when parsing an IPv4
- /// address.
- pub const ParseError = error{
- UnexpectedEndOfOctet,
- TooManyOctets,
- OctetOverflow,
- UnexpectedToken,
- IncompleteAddress,
- };
-
- /// Parses an arbitrary IPv4 address.
- pub fn parse(buf: []const u8) ParseError!IPv4 {
- var octets: [4]u8 = undefined;
- var octet: u8 = 0;
-
- var index: u8 = 0;
- var saw_any_digits: bool = false;
-
- for (buf) |c| {
- switch (c) {
- '.' => {
- if (!saw_any_digits) return error.UnexpectedEndOfOctet;
- if (index == 3) return error.TooManyOctets;
- octets[index] = octet;
- index += 1;
- octet = 0;
- saw_any_digits = false;
- },
- '0'...'9' => {
- saw_any_digits = true;
- octet = math.mul(u8, octet, 10) catch return error.OctetOverflow;
- octet = math.add(u8, octet, c - '0') catch return error.OctetOverflow;
- },
- else => return error.UnexpectedToken,
- }
- }
-
- if (index == 3 and saw_any_digits) {
- octets[index] = octet;
- return IPv4{ .octets = octets };
- }
-
- return error.IncompleteAddress;
- }
-
- /// Maps the address to its IPv6 equivalent. In most cases, you would
- /// want to map the address to its IPv6 equivalent rather than directly
- /// re-interpreting the address.
- pub fn mapToIPv6(self: IPv4) IPv6 {
- var octets: [16]u8 = undefined;
- mem.copy(u8, octets[0..12], &IPv6.v4_mapped_prefix);
- mem.copy(u8, octets[12..], &self.octets);
- return IPv6{ .octets = octets, .scope_id = IPv6.no_scope_id };
- }
-
- /// Directly re-interprets the address to its IPv6 equivalent. In most
- /// cases, you would want to map the address to its IPv6 equivalent rather
- /// than directly re-interpreting the address.
- pub fn toIPv6(self: IPv4) IPv6 {
- var octets: [16]u8 = undefined;
- mem.set(u8, octets[0..12], 0);
- mem.copy(u8, octets[12..], &self.octets);
- return IPv6{ .octets = octets, .scope_id = IPv6.no_scope_id };
- }
-};
-
-/// An IPv6 address comprised of 16 bytes for an address, and 4 bytes
-/// for a scope ID; cumulatively summing to 20 bytes in total.
-pub const IPv6 = extern struct {
- /// A IPv6 host-port pair.
- pub const Address = extern struct {
- host: IPv6,
- port: u16,
- };
-
- /// Octets of a IPv6 address designating the local host.
- pub const localhost_octets = [_]u8{0} ** 15 ++ [_]u8{0x01};
-
- /// The IPv6 address of the local host.
- pub const localhost: IPv6 = .{
- .octets = localhost_octets,
- .scope_id = no_scope_id,
- };
-
- /// Octets of an unspecified IPv6 address.
- pub const unspecified_octets = [_]u8{0} ** 16;
-
- /// An unspecified IPv6 address.
- pub const unspecified: IPv6 = .{
- .octets = unspecified_octets,
- .scope_id = no_scope_id,
- };
-
- /// The prefix of a IPv6 address that is mapped to a IPv4 address.
- pub const v4_mapped_prefix = [_]u8{0} ** 10 ++ [_]u8{0xFF} ** 2;
-
- /// A marker value used to designate an IPv6 address with no
- /// associated scope ID.
- pub const no_scope_id = math.maxInt(u32);
-
- octets: [16]u8,
- scope_id: u32,
-
- /// Returns whether or not the two addresses are equal to, less than, or
- /// greater than each other.
- pub fn cmp(self: IPv6, other: IPv6) math.Order {
- return switch (mem.order(u8, self.octets, other.octets)) {
- .eq => math.order(self.scope_id, other.scope_id),
- else => |order| order,
- };
- }
-
- /// Returns true if both addresses are semantically equivalent.
- pub fn eql(self: IPv6, other: IPv6) bool {
- return self.scope_id == other.scope_id and mem.eql(u8, &self.octets, &other.octets);
- }
-
- /// Returns true if the address is an unspecified IPv6 address.
- pub fn isUnspecified(self: IPv6) bool {
- return mem.eql(u8, &self.octets, &unspecified_octets);
- }
-
- /// Returns true if the address is a loopback address.
- pub fn isLoopback(self: IPv6) bool {
- return mem.eql(u8, self.octets[0..3], &[_]u8{ 0, 0, 0 }) and
- mem.eql(u8, self.octets[12..], &[_]u8{ 0, 0, 0, 1 });
- }
-
- /// Returns true if the address maps to an IPv4 address.
- pub fn mapsToIPv4(self: IPv6) bool {
- return mem.startsWith(u8, &self.octets, &v4_mapped_prefix);
- }
-
- /// Returns an IPv4 address representative of the address should
- /// it the address be mapped to an IPv4 address. It returns null
- /// otherwise.
- pub fn toIPv4(self: IPv6) ?IPv4 {
- if (!self.mapsToIPv4()) return null;
- return IPv4{ .octets = self.octets[12..][0..4].* };
- }
-
- /// Returns true if the address is a multicast IPv6 address.
- pub fn isMulticast(self: IPv6) bool {
- return self.octets[0] == 0xFF;
- }
-
- /// Returns true if the address is a unicast link local IPv6 address.
- pub fn isLinkLocal(self: IPv6) bool {
- return self.octets[0] == 0xFE and self.octets[1] & 0xC0 == 0x80;
- }
-
- /// Returns true if the address is a deprecated unicast site local
- /// IPv6 address. Refer to IETF RFC 3879 for more details as to
- /// why they are deprecated.
- pub fn isSiteLocal(self: IPv6) bool {
- return self.octets[0] == 0xFE and self.octets[1] & 0xC0 == 0xC0;
- }
-
- /// IPv6 multicast address scopes.
- pub const Scope = enum(u8) {
- interface = 1,
- link = 2,
- realm = 3,
- admin = 4,
- site = 5,
- organization = 8,
- global = 14,
- unknown = 0xFF,
- };
-
- /// Returns the multicast scope of the address.
- pub fn scope(self: IPv6) Scope {
- if (!self.isMulticast()) return .unknown;
-
- return switch (self.octets[0] & 0x0F) {
- 1 => .interface,
- 2 => .link,
- 3 => .realm,
- 4 => .admin,
- 5 => .site,
- 8 => .organization,
- 14 => .global,
- else => .unknown,
- };
- }
-
- /// Implements the `std.fmt.format` API. Specifying 'x' or 's' formats the
- /// address lower-cased octets, while specifying 'X' or 'S' formats the
- /// address using upper-cased ASCII octets.
- ///
- /// The default specifier is 'x'.
- pub fn format(
- self: IPv6,
- comptime layout: []const u8,
- opts: fmt.FormatOptions,
- writer: anytype,
- ) !void {
- _ = opts;
- const specifier = comptime &[_]u8{if (layout.len == 0) 'x' else switch (layout[0]) {
- 'x', 'X' => |specifier| specifier,
- 's' => 'x',
- 'S' => 'X',
- else => std.fmt.invalidFmtError(layout, self),
- }};
-
- if (mem.startsWith(u8, &self.octets, &v4_mapped_prefix)) {
- return fmt.format(writer, "::{" ++ specifier ++ "}{" ++ specifier ++ "}:{}.{}.{}.{}", .{
- 0xFF,
- 0xFF,
- self.octets[12],
- self.octets[13],
- self.octets[14],
- self.octets[15],
- });
- }
-
- const zero_span: struct { from: usize, to: usize } = span: {
- var i: usize = 0;
- while (i < self.octets.len) : (i += 2) {
- if (self.octets[i] == 0 and self.octets[i + 1] == 0) break;
- } else break :span .{ .from = 0, .to = 0 };
-
- const from = i;
-
- while (i < self.octets.len) : (i += 2) {
- if (self.octets[i] != 0 or self.octets[i + 1] != 0) break;
- }
-
- break :span .{ .from = from, .to = i };
- };
-
- var i: usize = 0;
- while (i != 16) : (i += 2) {
- if (zero_span.from != zero_span.to and i == zero_span.from) {
- try writer.writeAll("::");
- } else if (i >= zero_span.from and i < zero_span.to) {} else {
- if (i != 0 and i != zero_span.to) try writer.writeAll(":");
-
- const val = @as(u16, self.octets[i]) << 8 | self.octets[i + 1];
- try fmt.formatIntValue(val, specifier, .{}, writer);
- }
- }
-
- if (self.scope_id != no_scope_id and self.scope_id != 0) {
- try fmt.format(writer, "%{d}", .{self.scope_id});
- }
- }
-
- /// Set of possible errors that may encountered when parsing an IPv6
- /// address.
- pub const ParseError = error{
- MalformedV4Mapping,
- InterfaceNotFound,
- UnknownScopeId,
- } || IPv4.ParseError;
-
- /// Parses an arbitrary IPv6 address, including link-local addresses.
- pub fn parse(buf: []const u8) ParseError!IPv6 {
- if (mem.lastIndexOfScalar(u8, buf, '%')) |index| {
- const ip_slice = buf[0..index];
- const scope_id_slice = buf[index + 1 ..];
-
- if (scope_id_slice.len == 0) return error.UnknownScopeId;
-
- const scope_id: u32 = switch (scope_id_slice[0]) {
- '0'...'9' => fmt.parseInt(u32, scope_id_slice, 10),
- else => resolveScopeId(scope_id_slice) catch |err| switch (err) {
- error.InterfaceNotFound => return error.InterfaceNotFound,
- else => err,
- },
- } catch return error.UnknownScopeId;
-
- return parseWithScopeID(ip_slice, scope_id);
- }
-
- return parseWithScopeID(buf, no_scope_id);
- }
-
- /// Parses an IPv6 address with a pre-specified scope ID. Presumes
- /// that the address is not a link-local address.
- pub fn parseWithScopeID(buf: []const u8, scope_id: u32) ParseError!IPv6 {
- var octets: [16]u8 = undefined;
- var octet: u16 = 0;
- var tail: [16]u8 = undefined;
-
- var out: []u8 = &octets;
- var index: u8 = 0;
-
- var saw_any_digits: bool = false;
- var abbrv: bool = false;
-
- for (buf) |c, i| {
- switch (c) {
- ':' => {
- if (!saw_any_digits) {
- if (abbrv) return error.UnexpectedToken;
- if (i != 0) abbrv = true;
- mem.set(u8, out[index..], 0);
- out = &tail;
- index = 0;
- continue;
- }
- if (index == 14) return error.TooManyOctets;
-
- out[index] = @truncate(u8, octet >> 8);
- index += 1;
- out[index] = @truncate(u8, octet);
- index += 1;
-
- octet = 0;
- saw_any_digits = false;
- },
- '.' => {
- if (!abbrv or out[0] != 0xFF and out[1] != 0xFF) {
- return error.MalformedV4Mapping;
- }
- const start_index = mem.lastIndexOfScalar(u8, buf[0..i], ':').? + 1;
- const v4 = try IPv4.parse(buf[start_index..]);
- octets[10] = 0xFF;
- octets[11] = 0xFF;
- mem.copy(u8, octets[12..], &v4.octets);
-
- return IPv6{ .octets = octets, .scope_id = scope_id };
- },
- else => {
- saw_any_digits = true;
- const digit = fmt.charToDigit(c, 16) catch return error.UnexpectedToken;
- octet = math.mul(u16, octet, 16) catch return error.OctetOverflow;
- octet = math.add(u16, octet, digit) catch return error.OctetOverflow;
- },
- }
- }
-
- if (!saw_any_digits and !abbrv) {
- return error.IncompleteAddress;
- }
-
- if (index == 14) {
- out[14] = @truncate(u8, octet >> 8);
- out[15] = @truncate(u8, octet);
- } else {
- out[index] = @truncate(u8, octet >> 8);
- index += 1;
- out[index] = @truncate(u8, octet);
- index += 1;
- mem.copy(u8, octets[16 - index ..], out[0..index]);
- }
-
- return IPv6{ .octets = octets, .scope_id = scope_id };
- }
-};
-
-test {
- testing.refAllDecls(@This());
-}
-
-test "ip: convert to and from ipv6" {
- try testing.expectFmt("::7f00:1", "{}", .{IPv4.localhost.toIPv6()});
- try testing.expect(!IPv4.localhost.toIPv6().mapsToIPv4());
-
- try testing.expectFmt("::ffff:127.0.0.1", "{}", .{IPv4.localhost.mapToIPv6()});
- try testing.expect(IPv4.localhost.mapToIPv6().mapsToIPv4());
-
- try testing.expect(IPv4.localhost.toIPv6().toIPv4() == null);
- try testing.expectFmt("127.0.0.1", "{?}", .{IPv4.localhost.mapToIPv6().toIPv4()});
-}
-
-test "ipv4: parse & format" {
- const cases = [_][]const u8{
- "0.0.0.0",
- "255.255.255.255",
- "1.2.3.4",
- "123.255.0.91",
- "127.0.0.1",
- };
-
- for (cases) |case| {
- try testing.expectFmt(case, "{}", .{try IPv4.parse(case)});
- }
-}
-
-test "ipv6: parse & format" {
- const inputs = [_][]const u8{
- "FF01:0:0:0:0:0:0:FB",
- "FF01::Fb",
- "::1",
- "::",
- "2001:db8::",
- "::1234:5678",
- "2001:db8::1234:5678",
- "::ffff:123.5.123.5",
- };
-
- const outputs = [_][]const u8{
- "ff01::fb",
- "ff01::fb",
- "::1",
- "::",
- "2001:db8::",
- "::1234:5678",
- "2001:db8::1234:5678",
- "::ffff:123.5.123.5",
- };
-
- for (inputs) |input, i| {
- try testing.expectFmt(outputs[i], "{}", .{try IPv6.parse(input)});
- }
-}
-
-test "ipv6: parse & format addresses with scope ids" {
- if (!have_ifnamesize) return error.SkipZigTest;
- const iface = if (native_os.tag == .linux)
- "lo"
- else
- "lo0";
- const input = "FF01::FB%" ++ iface;
- const output = "ff01::fb%1";
-
- const parsed = IPv6.parse(input) catch |err| switch (err) {
- error.InterfaceNotFound => return,
- else => return err,
- };
-
- try testing.expectFmt(output, "{}", .{parsed});
-}
diff --git a/lib/std/x/os/socket.zig b/lib/std/x/os/socket.zig
deleted file mode 100644
index 3de9359cc5..0000000000
--- a/lib/std/x/os/socket.zig
+++ /dev/null
@@ -1,320 +0,0 @@
-const std = @import("../../std.zig");
-const builtin = @import("builtin");
-const net = @import("net.zig");
-
-const os = std.os;
-const fmt = std.fmt;
-const mem = std.mem;
-const time = std.time;
-const meta = std.meta;
-const native_os = builtin.os;
-const native_endian = builtin.cpu.arch.endian();
-
-const Buffer = std.x.os.Buffer;
-
-const assert = std.debug.assert;
-
-/// A generic, cross-platform socket abstraction.
-pub const Socket = struct {
- /// A socket-address pair.
- pub const Connection = struct {
- socket: Socket,
- address: Socket.Address,
-
- /// Enclose a socket and address into a socket-address pair.
- pub fn from(socket: Socket, address: Socket.Address) Socket.Connection {
- return .{ .socket = socket, .address = address };
- }
- };
-
- /// A generic socket address abstraction. It is safe to directly access and modify
- /// the fields of a `Socket.Address`.
- pub const Address = union(enum) {
- pub const Native = struct {
- pub const requires_prepended_length = native_os.getVersionRange() == .semver;
- pub const Length = if (requires_prepended_length) u8 else [0]u8;
-
- pub const Family = if (requires_prepended_length) u8 else c_ushort;
-
- /// POSIX `sockaddr.storage`. The expected size and alignment is specified in IETF RFC 2553.
- pub const Storage = extern struct {
- pub const expected_size = os.sockaddr.SS_MAXSIZE;
- pub const expected_alignment = 8;
-
- pub const padding_size = expected_size -
- mem.alignForward(@sizeOf(Address.Native.Length), expected_alignment) -
- mem.alignForward(@sizeOf(Address.Native.Family), expected_alignment);
-
- len: Address.Native.Length align(expected_alignment) = undefined,
- family: Address.Native.Family align(expected_alignment) = undefined,
- padding: [padding_size]u8 align(expected_alignment) = undefined,
-
- comptime {
- assert(@sizeOf(Storage) == Storage.expected_size);
- assert(@alignOf(Storage) == Storage.expected_alignment);
- }
- };
- };
-
- ipv4: net.IPv4.Address,
- ipv6: net.IPv6.Address,
-
- /// Instantiate a new address with a IPv4 host and port.
- pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address {
- return .{ .ipv4 = .{ .host = host, .port = port } };
- }
-
- /// Instantiate a new address with a IPv6 host and port.
- pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address {
- return .{ .ipv6 = .{ .host = host, .port = port } };
- }
-
- /// Parses a `sockaddr` into a generic socket address.
- pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address {
- switch (address.family) {
- os.AF.INET => {
- const info = @ptrCast(*const os.sockaddr.in, address);
- const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) };
- const port = mem.bigToNative(u16, info.port);
- return Socket.Address.initIPv4(host, port);
- },
- os.AF.INET6 => {
- const info = @ptrCast(*const os.sockaddr.in6, address);
- const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id };
- const port = mem.bigToNative(u16, info.port);
- return Socket.Address.initIPv6(host, port);
- },
- else => unreachable,
- }
- }
-
- /// Encodes a generic socket address into an extern union that may be reliably
- /// casted into a `sockaddr` which may be passed into socket syscalls.
- pub fn toNative(self: Socket.Address) extern union {
- ipv4: os.sockaddr.in,
- ipv6: os.sockaddr.in6,
- } {
- return switch (self) {
- .ipv4 => |address| .{
- .ipv4 = .{
- .addr = @bitCast(u32, address.host.octets),
- .port = mem.nativeToBig(u16, address.port),
- },
- },
- .ipv6 => |address| .{
- .ipv6 = .{
- .addr = address.host.octets,
- .port = mem.nativeToBig(u16, address.port),
- .scope_id = address.host.scope_id,
- .flowinfo = 0,
- },
- },
- };
- }
-
- /// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
- pub fn getNativeSize(self: Socket.Address) u32 {
- return switch (self) {
- .ipv4 => @sizeOf(os.sockaddr.in),
- .ipv6 => @sizeOf(os.sockaddr.in6),
- };
- }
-
- /// Implements the `std.fmt.format` API.
- pub fn format(
- self: Socket.Address,
- comptime layout: []const u8,
- opts: fmt.FormatOptions,
- writer: anytype,
- ) !void {
- if (layout.len != 0) std.fmt.invalidFmtError(layout, self);
- _ = opts;
- switch (self) {
- .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
- .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
- }
- }
- };
-
- /// POSIX `msghdr`. Denotes a destination address, set of buffers, control data, and flags. Ported
- /// directly from musl.
- pub const Message = if (native_os.isAtLeast(.windows, .vista) != null and native_os.isAtLeast(.windows, .vista).?)
- extern struct {
- name: usize = @ptrToInt(@as(?[*]u8, null)),
- name_len: c_int = 0,
-
- buffers: usize = undefined,
- buffers_len: c_ulong = undefined,
-
- control: Buffer = .{
- .ptr = @ptrToInt(@as(?[*]u8, null)),
- .len = 0,
- },
- flags: c_ulong = 0,
-
- pub usingnamespace MessageMixin(Message);
- }
- else if (native_os.tag == .windows)
- extern struct {
- name: usize = @ptrToInt(@as(?[*]u8, null)),
- name_len: c_int = 0,
-
- buffers: usize = undefined,
- buffers_len: u32 = undefined,
-
- control: Buffer = .{
- .ptr = @ptrToInt(@as(?[*]u8, null)),
- .len = 0,
- },
- flags: u32 = 0,
-
- pub usingnamespace MessageMixin(Message);
- }
- else if (@sizeOf(usize) > 4 and native_endian == .Big)
- extern struct {
- name: usize = @ptrToInt(@as(?[*]u8, null)),
- name_len: c_uint = 0,
-
- buffers: usize = undefined,
- _pad_1: c_int = 0,
- buffers_len: c_int = undefined,
-
- control: usize = @ptrToInt(@as(?[*]u8, null)),
- _pad_2: c_int = 0,
- control_len: c_uint = 0,
-
- flags: c_int = 0,
-
- pub usingnamespace MessageMixin(Message);
- }
- else if (@sizeOf(usize) > 4 and native_endian == .Little)
- extern struct {
- name: usize = @ptrToInt(@as(?[*]u8, null)),
- name_len: c_uint = 0,
-
- buffers: usize = undefined,
- buffers_len: c_int = undefined,
- _pad_1: c_int = 0,
-
- control: usize = @ptrToInt(@as(?[*]u8, null)),
- control_len: c_uint = 0,
- _pad_2: c_int = 0,
-
- flags: c_int = 0,
-
- pub usingnamespace MessageMixin(Message);
- }
- else
- extern struct {
- name: usize = @ptrToInt(@as(?[*]u8, null)),
- name_len: c_uint = 0,
-
- buffers: usize = undefined,
- buffers_len: c_int = undefined,
-
- control: usize = @ptrToInt(@as(?[*]u8, null)),
- control_len: c_uint = 0,
-
- flags: c_int = 0,
-
- pub usingnamespace MessageMixin(Message);
- };
-
- fn MessageMixin(comptime Self: type) type {
- return struct {
- pub fn fromBuffers(buffers: []const Buffer) Self {
- var self: Self = .{};
- self.setBuffers(buffers);
- return self;
- }
-
- pub fn setName(self: *Self, name: []const u8) void {
- self.name = @ptrToInt(name.ptr);
- self.name_len = @intCast(meta.fieldInfo(Self, .name_len).field_type, name.len);
- }
-
- pub fn setBuffers(self: *Self, buffers: []const Buffer) void {
- self.buffers = @ptrToInt(buffers.ptr);
- self.buffers_len = @intCast(meta.fieldInfo(Self, .buffers_len).field_type, buffers.len);
- }
-
- pub fn setControl(self: *Self, control: []const u8) void {
- if (native_os.tag == .windows) {
- self.control = Buffer.from(control);
- } else {
- self.control = @ptrToInt(control.ptr);
- self.control_len = @intCast(meta.fieldInfo(Self, .control_len).field_type, control.len);
- }
- }
-
- pub fn setFlags(self: *Self, flags: u32) void {
- self.flags = @intCast(meta.fieldInfo(Self, .flags).field_type, flags);
- }
-
- pub fn getName(self: Self) []const u8 {
- return @intToPtr([*]const u8, self.name)[0..@intCast(usize, self.name_len)];
- }
-
- pub fn getBuffers(self: Self) []const Buffer {
- return @intToPtr([*]const Buffer, self.buffers)[0..@intCast(usize, self.buffers_len)];
- }
-
- pub fn getControl(self: Self) []const u8 {
- if (native_os.tag == .windows) {
- return self.control.into();
- } else {
- return @intToPtr([*]const u8, self.control)[0..@intCast(usize, self.control_len)];
- }
- }
-
- pub fn getFlags(self: Self) u32 {
- return @intCast(u32, self.flags);
- }
- };
- }
-
- /// POSIX `linger`, denoting the linger settings of a socket.
- ///
- /// Microsoft's documentation and glibc denote the fields to be unsigned
- /// short's on Windows, whereas glibc and musl denote the fields to be
- /// int's on every other platform.
- pub const Linger = extern struct {
- pub const Field = switch (native_os.tag) {
- .windows => c_ushort,
- else => c_int,
- };
-
- enabled: Field,
- timeout_seconds: Field,
-
- pub fn init(timeout_seconds: ?u16) Socket.Linger {
- return .{
- .enabled = @intCast(Socket.Linger.Field, @boolToInt(timeout_seconds != null)),
- .timeout_seconds = if (timeout_seconds) |seconds| @intCast(Socket.Linger.Field, seconds) else 0,
- };
- }
- };
-
- /// Possible set of flags to initialize a socket with.
- pub const InitFlags = enum {
- // Initialize a socket to be non-blocking.
- nonblocking,
-
- // Have a socket close itself on exec syscalls.
- close_on_exec,
- };
-
- /// The underlying handle of a socket.
- fd: os.socket_t,
-
- /// Enclose a socket abstraction over an existing socket file descriptor.
- pub fn from(fd: os.socket_t) Socket {
- return Socket{ .fd = fd };
- }
-
- /// Mix in socket syscalls depending on the platform we are compiling against.
- pub usingnamespace switch (native_os.tag) {
- .windows => @import("socket_windows.zig"),
- else => @import("socket_posix.zig"),
- }.Mixin(Socket);
-};
diff --git a/lib/std/x/os/socket_posix.zig b/lib/std/x/os/socket_posix.zig
deleted file mode 100644
index 859075aa20..0000000000
--- a/lib/std/x/os/socket_posix.zig
+++ /dev/null
@@ -1,275 +0,0 @@
-const std = @import("../../std.zig");
-
-const os = std.os;
-const mem = std.mem;
-const time = std.time;
-
-pub fn Mixin(comptime Socket: type) type {
- return struct {
- /// Open a new socket.
- pub fn init(domain: u32, socket_type: u32, protocol: u32, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Socket {
- var raw_flags: u32 = socket_type;
- const set = std.EnumSet(Socket.InitFlags).init(flags);
- if (set.contains(.close_on_exec)) raw_flags |= os.SOCK.CLOEXEC;
- if (set.contains(.nonblocking)) raw_flags |= os.SOCK.NONBLOCK;
- return Socket{ .fd = try os.socket(domain, raw_flags, protocol) };
- }
-
- /// Closes the socket.
- pub fn deinit(self: Socket) void {
- os.closeSocket(self.fd);
- }
-
- /// Shutdown either the read side, write side, or all side of the socket.
- pub fn shutdown(self: Socket, how: os.ShutdownHow) !void {
- return os.shutdown(self.fd, how);
- }
-
- /// Binds the socket to an address.
- pub fn bind(self: Socket, address: Socket.Address) !void {
- return os.bind(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize());
- }
-
- /// Start listening for incoming connections on the socket.
- pub fn listen(self: Socket, max_backlog_size: u31) !void {
- return os.listen(self.fd, max_backlog_size);
- }
-
- /// Have the socket attempt to the connect to an address.
- pub fn connect(self: Socket, address: Socket.Address) !void {
- return os.connect(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize());
- }
-
- /// Accept a pending incoming connection queued to the kernel backlog
- /// of the socket.
- pub fn accept(self: Socket, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Socket.Connection {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: u32 = @sizeOf(Socket.Address.Native.Storage);
-
- var raw_flags: u32 = 0;
- const set = std.EnumSet(Socket.InitFlags).init(flags);
- if (set.contains(.close_on_exec)) raw_flags |= os.SOCK.CLOEXEC;
- if (set.contains(.nonblocking)) raw_flags |= os.SOCK.NONBLOCK;
-
- const socket = Socket{ .fd = try os.accept(self.fd, @ptrCast(*os.sockaddr, &address), &address_len, raw_flags) };
- const socket_address = Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address));
-
- return Socket.Connection.from(socket, socket_address);
- }
-
- /// Read data from the socket into the buffer provided with a set of flags
- /// specified. It returns the number of bytes read into the buffer provided.
- pub fn read(self: Socket, buf: []u8, flags: u32) !usize {
- return os.recv(self.fd, buf, flags);
- }
-
- /// Write a buffer of data provided to the socket with a set of flags specified.
- /// It returns the number of bytes that are written to the socket.
- pub fn write(self: Socket, buf: []const u8, flags: u32) !usize {
- return os.send(self.fd, buf, flags);
- }
-
- /// Writes multiple I/O vectors with a prepended message header to the socket
- /// with a set of flags specified. It returns the number of bytes that are
- /// written to the socket.
- pub fn writeMessage(self: Socket, msg: Socket.Message, flags: u32) !usize {
- while (true) {
- const rc = os.system.sendmsg(self.fd, &msg, @intCast(c_int, flags));
- return switch (os.errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
- .ACCES => error.AccessDenied,
- .AGAIN => error.WouldBlock,
- .ALREADY => error.FastOpenAlreadyInProgress,
- .BADF => unreachable, // always a race condition
- .CONNRESET => error.ConnectionResetByPeer,
- .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set.
- .FAULT => unreachable, // An invalid user space address was specified for an argument.
- .INTR => continue,
- .INVAL => unreachable, // Invalid argument passed.
- .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified
- .MSGSIZE => error.MessageTooBig,
- .NOBUFS => error.SystemResources,
- .NOMEM => error.SystemResources,
- .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket.
- .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type.
- .PIPE => error.BrokenPipe,
- .AFNOSUPPORT => error.AddressFamilyNotSupported,
- .LOOP => error.SymLinkLoop,
- .NAMETOOLONG => error.NameTooLong,
- .NOENT => error.FileNotFound,
- .NOTDIR => error.NotDir,
- .HOSTUNREACH => error.NetworkUnreachable,
- .NETUNREACH => error.NetworkUnreachable,
- .NOTCONN => error.SocketNotConnected,
- .NETDOWN => error.NetworkSubsystemFailed,
- else => |err| os.unexpectedErrno(err),
- };
- }
- }
-
- /// Read multiple I/O vectors with a prepended message header from the socket
- /// with a set of flags specified. It returns the number of bytes that were
- /// read into the buffer provided.
- pub fn readMessage(self: Socket, msg: *Socket.Message, flags: u32) !usize {
- while (true) {
- const rc = os.system.recvmsg(self.fd, msg, @intCast(c_int, flags));
- return switch (os.errno(rc)) {
- .SUCCESS => @intCast(usize, rc),
- .BADF => unreachable, // always a race condition
- .FAULT => unreachable,
- .INVAL => unreachable,
- .NOTCONN => unreachable,
- .NOTSOCK => unreachable,
- .INTR => continue,
- .AGAIN => error.WouldBlock,
- .NOMEM => error.SystemResources,
- .CONNREFUSED => error.ConnectionRefused,
- .CONNRESET => error.ConnectionResetByPeer,
- else => |err| os.unexpectedErrno(err),
- };
- }
- }
-
- /// Query the address that the socket is locally bounded to.
- pub fn getLocalAddress(self: Socket) !Socket.Address {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: u32 = @sizeOf(Socket.Address.Native.Storage);
- try os.getsockname(self.fd, @ptrCast(*os.sockaddr, &address), &address_len);
- return Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address));
- }
-
- /// Query the address that the socket is connected to.
- pub fn getRemoteAddress(self: Socket) !Socket.Address {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: u32 = @sizeOf(Socket.Address.Native.Storage);
- try os.getpeername(self.fd, @ptrCast(*os.sockaddr, &address), &address_len);
- return Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address));
- }
-
- /// Query and return the latest cached error on the socket.
- pub fn getError(self: Socket) !void {
- return os.getsockoptError(self.fd);
- }
-
- /// Query the read buffer size of the socket.
- pub fn getReadBufferSize(self: Socket) !u32 {
- var value: u32 = undefined;
- var value_len: u32 = @sizeOf(u32);
-
- const rc = os.system.getsockopt(self.fd, os.SOL.SOCKET, os.SO.RCVBUF, mem.asBytes(&value), &value_len);
- return switch (os.errno(rc)) {
- .SUCCESS => value,
- .BADF => error.BadFileDescriptor,
- .FAULT => error.InvalidAddressSpace,
- .INVAL => error.InvalidSocketOption,
- .NOPROTOOPT => error.UnknownSocketOption,
- .NOTSOCK => error.NotASocket,
- else => |err| os.unexpectedErrno(err),
- };
- }
-
- /// Query the write buffer size of the socket.
- pub fn getWriteBufferSize(self: Socket) !u32 {
- var value: u32 = undefined;
- var value_len: u32 = @sizeOf(u32);
-
- const rc = os.system.getsockopt(self.fd, os.SOL.SOCKET, os.SO.SNDBUF, mem.asBytes(&value), &value_len);
- return switch (os.errno(rc)) {
- .SUCCESS => value,
- .BADF => error.BadFileDescriptor,
- .FAULT => error.InvalidAddressSpace,
- .INVAL => error.InvalidSocketOption,
- .NOPROTOOPT => error.UnknownSocketOption,
- .NOTSOCK => error.NotASocket,
- else => |err| os.unexpectedErrno(err),
- };
- }
-
- /// Set a socket option.
- pub fn setOption(self: Socket, level: u32, code: u32, value: []const u8) !void {
- return os.setsockopt(self.fd, level, code, value);
- }
-
- /// Have close() or shutdown() syscalls block until all queued messages in the socket have been successfully
- /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption`
- /// if the host does not support the option for a socket to linger around up until a timeout specified in
- /// seconds.
- pub fn setLinger(self: Socket, timeout_seconds: ?u16) !void {
- if (@hasDecl(os.SO, "LINGER")) {
- const settings = Socket.Linger.init(timeout_seconds);
- return self.setOption(os.SOL.SOCKET, os.SO.LINGER, mem.asBytes(&settings));
- }
-
- return error.UnsupportedSocketOption;
- }
-
- /// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive
- /// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if
- /// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
- pub fn setKeepAlive(self: Socket, enabled: bool) !void {
- if (@hasDecl(os.SO, "KEEPALIVE")) {
- return self.setOption(os.SOL.SOCKET, os.SO.KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
- /// the host does not support sockets listening the same address.
- pub fn setReuseAddress(self: Socket, enabled: bool) !void {
- if (@hasDecl(os.SO, "REUSEADDR")) {
- return self.setOption(os.SOL.SOCKET, os.SO.REUSEADDR, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
- /// the host does not supports sockets listening on the same port.
- pub fn setReusePort(self: Socket, enabled: bool) !void {
- if (@hasDecl(os.SO, "REUSEPORT")) {
- return self.setOption(os.SOL.SOCKET, os.SO.REUSEPORT, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
- return error.UnsupportedSocketOption;
- }
-
- /// Set the write buffer size of the socket.
- pub fn setWriteBufferSize(self: Socket, size: u32) !void {
- return self.setOption(os.SOL.SOCKET, os.SO.SNDBUF, mem.asBytes(&size));
- }
-
- /// Set the read buffer size of the socket.
- pub fn setReadBufferSize(self: Socket, size: u32) !void {
- return self.setOption(os.SOL.SOCKET, os.SO.RCVBUF, mem.asBytes(&size));
- }
-
- /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
- /// set on a non-blocking socket.
- ///
- /// Set a timeout on the socket that is to occur if no messages are successfully written
- /// to its bound destination after a specified number of milliseconds. A subsequent write
- /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
- pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void {
- const timeout = os.timeval{
- .tv_sec = @intCast(i32, milliseconds / time.ms_per_s),
- .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms),
- };
-
- return self.setOption(os.SOL.SOCKET, os.SO.SNDTIMEO, mem.asBytes(&timeout));
- }
-
- /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
- /// set on a non-blocking socket.
- ///
- /// Set a timeout on the socket that is to occur if no messages are successfully read
- /// from its bound destination after a specified number of milliseconds. A subsequent
- /// read from the socket will thereafter return `error.WouldBlock` should the timeout be
- /// exceeded.
- pub fn setReadTimeout(self: Socket, milliseconds: usize) !void {
- const timeout = os.timeval{
- .tv_sec = @intCast(i32, milliseconds / time.ms_per_s),
- .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms),
- };
-
- return self.setOption(os.SOL.SOCKET, os.SO.RCVTIMEO, mem.asBytes(&timeout));
- }
- };
-}
diff --git a/lib/std/x/os/socket_windows.zig b/lib/std/x/os/socket_windows.zig
deleted file mode 100644
index 43b047dd10..0000000000
--- a/lib/std/x/os/socket_windows.zig
+++ /dev/null
@@ -1,458 +0,0 @@
-const std = @import("../../std.zig");
-const net = @import("net.zig");
-
-const os = std.os;
-const mem = std.mem;
-
-const windows = std.os.windows;
-const ws2_32 = windows.ws2_32;
-
-pub fn Mixin(comptime Socket: type) type {
- return struct {
- /// Open a new socket.
- pub fn init(domain: u32, socket_type: u32, protocol: u32, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Socket {
- var raw_flags: u32 = ws2_32.WSA_FLAG_OVERLAPPED;
- const set = std.EnumSet(Socket.InitFlags).init(flags);
- if (set.contains(.close_on_exec)) raw_flags |= ws2_32.WSA_FLAG_NO_HANDLE_INHERIT;
-
- const fd = ws2_32.WSASocketW(
- @intCast(i32, domain),
- @intCast(i32, socket_type),
- @intCast(i32, protocol),
- null,
- 0,
- raw_flags,
- );
- if (fd == ws2_32.INVALID_SOCKET) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSANOTINITIALISED => {
- _ = try windows.WSAStartup(2, 2);
- return init(domain, socket_type, protocol, flags);
- },
- .WSAEAFNOSUPPORT => error.AddressFamilyNotSupported,
- .WSAEMFILE => error.ProcessFdQuotaExceeded,
- .WSAENOBUFS => error.SystemResources,
- .WSAEPROTONOSUPPORT => error.ProtocolNotSupported,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- if (set.contains(.nonblocking)) {
- var enabled: c_ulong = 1;
- const rc = ws2_32.ioctlsocket(fd, ws2_32.FIONBIO, &enabled);
- if (rc == ws2_32.SOCKET_ERROR) {
- return windows.unexpectedWSAError(ws2_32.WSAGetLastError());
- }
- }
-
- return Socket{ .fd = fd };
- }
-
- /// Closes the socket.
- pub fn deinit(self: Socket) void {
- _ = ws2_32.closesocket(self.fd);
- }
-
- /// Shutdown either the read side, write side, or all side of the socket.
- pub fn shutdown(self: Socket, how: os.ShutdownHow) !void {
- const rc = ws2_32.shutdown(self.fd, switch (how) {
- .recv => ws2_32.SD_RECEIVE,
- .send => ws2_32.SD_SEND,
- .both => ws2_32.SD_BOTH,
- });
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAECONNABORTED => return error.ConnectionAborted,
- .WSAECONNRESET => return error.ConnectionResetByPeer,
- .WSAEINPROGRESS => return error.BlockingOperationInProgress,
- .WSAEINVAL => unreachable,
- .WSAENETDOWN => return error.NetworkSubsystemFailed,
- .WSAENOTCONN => return error.SocketNotConnected,
- .WSAENOTSOCK => unreachable,
- .WSANOTINITIALISED => unreachable,
- else => |err| return windows.unexpectedWSAError(err),
- };
- }
- }
-
- /// Binds the socket to an address.
- pub fn bind(self: Socket, address: Socket.Address) !void {
- const rc = ws2_32.bind(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize()));
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAEACCES => error.AccessDenied,
- .WSAEADDRINUSE => error.AddressInUse,
- .WSAEADDRNOTAVAIL => error.AddressNotAvailable,
- .WSAEFAULT => error.BadAddress,
- .WSAEINPROGRESS => error.WouldBlock,
- .WSAEINVAL => error.AlreadyBound,
- .WSAENOBUFS => error.NoEphemeralPortsAvailable,
- .WSAENOTSOCK => error.NotASocket,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
- }
-
- /// Start listening for incoming connections on the socket.
- pub fn listen(self: Socket, max_backlog_size: u31) !void {
- const rc = ws2_32.listen(self.fd, max_backlog_size);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAEADDRINUSE => error.AddressInUse,
- .WSAEISCONN => error.AlreadyConnected,
- .WSAEINVAL => error.SocketNotBound,
- .WSAEMFILE, .WSAENOBUFS => error.SystemResources,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAEINPROGRESS => error.WouldBlock,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
- }
-
- /// Have the socket attempt to the connect to an address.
- pub fn connect(self: Socket, address: Socket.Address) !void {
- const rc = ws2_32.connect(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize()));
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAEADDRINUSE => error.AddressInUse,
- .WSAEADDRNOTAVAIL => error.AddressNotAvailable,
- .WSAECONNREFUSED => error.ConnectionRefused,
- .WSAETIMEDOUT => error.ConnectionTimedOut,
- .WSAEFAULT => error.BadAddress,
- .WSAEINVAL => error.ListeningSocket,
- .WSAEISCONN => error.AlreadyConnected,
- .WSAENOTSOCK => error.NotASocket,
- .WSAEACCES => error.BroadcastNotEnabled,
- .WSAENOBUFS => error.SystemResources,
- .WSAEAFNOSUPPORT => error.AddressFamilyNotSupported,
- .WSAEINPROGRESS, .WSAEWOULDBLOCK => error.WouldBlock,
- .WSAEHOSTUNREACH, .WSAENETUNREACH => error.NetworkUnreachable,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
- }
-
- /// Accept a pending incoming connection queued to the kernel backlog
- /// of the socket.
- pub fn accept(self: Socket, flags: std.enums.EnumFieldStruct(Socket.InitFlags, bool, false)) !Socket.Connection {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: c_int = @sizeOf(Socket.Address.Native.Storage);
-
- const fd = ws2_32.accept(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
- if (fd == ws2_32.INVALID_SOCKET) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSANOTINITIALISED => unreachable,
- .WSAECONNRESET => error.ConnectionResetByPeer,
- .WSAEFAULT => unreachable,
- .WSAEINVAL => error.SocketNotListening,
- .WSAEMFILE => error.ProcessFdQuotaExceeded,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENOBUFS => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAEWOULDBLOCK => error.WouldBlock,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- const socket = Socket.from(fd);
- errdefer socket.deinit();
-
- const socket_address = Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
-
- const set = std.EnumSet(Socket.InitFlags).init(flags);
- if (set.contains(.nonblocking)) {
- var enabled: c_ulong = 1;
- const rc = ws2_32.ioctlsocket(fd, ws2_32.FIONBIO, &enabled);
- if (rc == ws2_32.SOCKET_ERROR) {
- return windows.unexpectedWSAError(ws2_32.WSAGetLastError());
- }
- }
-
- return Socket.Connection.from(socket, socket_address);
- }
-
- /// Read data from the socket into the buffer provided with a set of flags
- /// specified. It returns the number of bytes read into the buffer provided.
- pub fn read(self: Socket, buf: []u8, flags: u32) !usize {
- var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = buf.ptr }};
- var num_bytes: u32 = undefined;
- var flags_ = flags;
-
- const rc = ws2_32.WSARecv(self.fd, bufs, 1, &num_bytes, &flags_, null, null);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAECONNABORTED => error.ConnectionAborted,
- .WSAECONNRESET => error.ConnectionResetByPeer,
- .WSAEDISCON => error.ConnectionClosedByPeer,
- .WSAEFAULT => error.BadBuffer,
- .WSAEINPROGRESS,
- .WSAEWOULDBLOCK,
- .WSA_IO_PENDING,
- .WSAETIMEDOUT,
- => error.WouldBlock,
- .WSAEINTR => error.Cancelled,
- .WSAEINVAL => error.SocketNotBound,
- .WSAEMSGSIZE => error.MessageTooLarge,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENETRESET => error.NetworkReset,
- .WSAENOTCONN => error.SocketNotConnected,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAESHUTDOWN => error.AlreadyShutdown,
- .WSA_OPERATION_ABORTED => error.OperationAborted,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return @intCast(usize, num_bytes);
- }
-
- /// Write a buffer of data provided to the socket with a set of flags specified.
- /// It returns the number of bytes that are written to the socket.
- pub fn write(self: Socket, buf: []const u8, flags: u32) !usize {
- var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = @intToPtr([*]u8, @ptrToInt(buf.ptr)) }};
- var num_bytes: u32 = undefined;
-
- const rc = ws2_32.WSASend(self.fd, bufs, 1, &num_bytes, flags, null, null);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAECONNABORTED => error.ConnectionAborted,
- .WSAECONNRESET => error.ConnectionResetByPeer,
- .WSAEFAULT => error.BadBuffer,
- .WSAEINPROGRESS,
- .WSAEWOULDBLOCK,
- .WSA_IO_PENDING,
- .WSAETIMEDOUT,
- => error.WouldBlock,
- .WSAEINTR => error.Cancelled,
- .WSAEINVAL => error.SocketNotBound,
- .WSAEMSGSIZE => error.MessageTooLarge,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENETRESET => error.NetworkReset,
- .WSAENOBUFS => error.BufferDeadlock,
- .WSAENOTCONN => error.SocketNotConnected,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAESHUTDOWN => error.AlreadyShutdown,
- .WSA_OPERATION_ABORTED => error.OperationAborted,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return @intCast(usize, num_bytes);
- }
-
- /// Writes multiple I/O vectors with a prepended message header to the socket
- /// with a set of flags specified. It returns the number of bytes that are
- /// written to the socket.
- pub fn writeMessage(self: Socket, msg: Socket.Message, flags: u32) !usize {
- const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSASENDMSG, self.fd, ws2_32.WSAID_WSASENDMSG);
-
- var num_bytes: u32 = undefined;
-
- const rc = call(self.fd, &msg, flags, &num_bytes, null, null);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAECONNABORTED => error.ConnectionAborted,
- .WSAECONNRESET => error.ConnectionResetByPeer,
- .WSAEFAULT => error.BadBuffer,
- .WSAEINPROGRESS,
- .WSAEWOULDBLOCK,
- .WSA_IO_PENDING,
- .WSAETIMEDOUT,
- => error.WouldBlock,
- .WSAEINTR => error.Cancelled,
- .WSAEINVAL => error.SocketNotBound,
- .WSAEMSGSIZE => error.MessageTooLarge,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENETRESET => error.NetworkReset,
- .WSAENOBUFS => error.BufferDeadlock,
- .WSAENOTCONN => error.SocketNotConnected,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAESHUTDOWN => error.AlreadyShutdown,
- .WSA_OPERATION_ABORTED => error.OperationAborted,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return @intCast(usize, num_bytes);
- }
-
- /// Read multiple I/O vectors with a prepended message header from the socket
- /// with a set of flags specified. It returns the number of bytes that were
- /// read into the buffer provided.
- pub fn readMessage(self: Socket, msg: *Socket.Message, flags: u32) !usize {
- _ = flags;
- const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSARECVMSG, self.fd, ws2_32.WSAID_WSARECVMSG);
-
- var num_bytes: u32 = undefined;
-
- const rc = call(self.fd, msg, &num_bytes, null, null);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSAECONNABORTED => error.ConnectionAborted,
- .WSAECONNRESET => error.ConnectionResetByPeer,
- .WSAEDISCON => error.ConnectionClosedByPeer,
- .WSAEFAULT => error.BadBuffer,
- .WSAEINPROGRESS,
- .WSAEWOULDBLOCK,
- .WSA_IO_PENDING,
- .WSAETIMEDOUT,
- => error.WouldBlock,
- .WSAEINTR => error.Cancelled,
- .WSAEINVAL => error.SocketNotBound,
- .WSAEMSGSIZE => error.MessageTooLarge,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENETRESET => error.NetworkReset,
- .WSAENOTCONN => error.SocketNotConnected,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEOPNOTSUPP => error.OperationNotSupported,
- .WSAESHUTDOWN => error.AlreadyShutdown,
- .WSA_OPERATION_ABORTED => error.OperationAborted,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return @intCast(usize, num_bytes);
- }
-
- /// Query the address that the socket is locally bounded to.
- pub fn getLocalAddress(self: Socket) !Socket.Address {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: c_int = @sizeOf(Socket.Address.Native.Storage);
-
- const rc = ws2_32.getsockname(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSANOTINITIALISED => unreachable,
- .WSAEFAULT => unreachable,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEINVAL => error.SocketNotBound,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
- }
-
- /// Query the address that the socket is connected to.
- pub fn getRemoteAddress(self: Socket) !Socket.Address {
- var address: Socket.Address.Native.Storage = undefined;
- var address_len: c_int = @sizeOf(Socket.Address.Native.Storage);
-
- const rc = ws2_32.getpeername(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSANOTINITIALISED => unreachable,
- .WSAEFAULT => unreachable,
- .WSAENETDOWN => error.NetworkSubsystemFailed,
- .WSAENOTSOCK => error.FileDescriptorNotASocket,
- .WSAEINVAL => error.SocketNotBound,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
-
- return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
- }
-
- /// Query and return the latest cached error on the socket.
- pub fn getError(self: Socket) !void {
- _ = self;
- return {};
- }
-
- /// Query the read buffer size of the socket.
- pub fn getReadBufferSize(self: Socket) !u32 {
- _ = self;
- return 0;
- }
-
- /// Query the write buffer size of the socket.
- pub fn getWriteBufferSize(self: Socket) !u32 {
- _ = self;
- return 0;
- }
-
- /// Set a socket option.
- pub fn setOption(self: Socket, level: u32, code: u32, value: []const u8) !void {
- const rc = ws2_32.setsockopt(self.fd, @intCast(i32, level), @intCast(i32, code), value.ptr, @intCast(i32, value.len));
- if (rc == ws2_32.SOCKET_ERROR) {
- return switch (ws2_32.WSAGetLastError()) {
- .WSANOTINITIALISED => unreachable,
- .WSAENETDOWN => return error.NetworkSubsystemFailed,
- .WSAEFAULT => unreachable,
- .WSAENOTSOCK => return error.FileDescriptorNotASocket,
- .WSAEINVAL => return error.SocketNotBound,
- else => |err| windows.unexpectedWSAError(err),
- };
- }
- }
-
- /// Have close() or shutdown() syscalls block until all queued messages in the socket have been successfully
- /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption`
- /// if the host does not support the option for a socket to linger around up until a timeout specified in
- /// seconds.
- pub fn setLinger(self: Socket, timeout_seconds: ?u16) !void {
- const settings = Socket.Linger.init(timeout_seconds);
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.LINGER, mem.asBytes(&settings));
- }
-
- /// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive
- /// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if
- /// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
- pub fn setKeepAlive(self: Socket, enabled: bool) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
-
- /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
- /// the host does not support sockets listening the same address.
- pub fn setReuseAddress(self: Socket, enabled: bool) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.REUSEADDR, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- }
-
- /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
- /// the host does not supports sockets listening on the same port.
- ///
- /// TODO: verify if this truly mimicks SO.REUSEPORT behavior, or if SO.REUSE_UNICASTPORT provides the correct behavior
- pub fn setReusePort(self: Socket, enabled: bool) !void {
- try self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.BROADCAST, mem.asBytes(&@as(u32, @boolToInt(enabled))));
- try self.setReuseAddress(enabled);
- }
-
- /// Set the write buffer size of the socket.
- pub fn setWriteBufferSize(self: Socket, size: u32) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.SNDBUF, mem.asBytes(&size));
- }
-
- /// Set the read buffer size of the socket.
- pub fn setReadBufferSize(self: Socket, size: u32) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.RCVBUF, mem.asBytes(&size));
- }
-
- /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
- /// set on a non-blocking socket.
- ///
- /// Set a timeout on the socket that is to occur if no messages are successfully written
- /// to its bound destination after a specified number of milliseconds. A subsequent write
- /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
- pub fn setWriteTimeout(self: Socket, milliseconds: u32) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.SNDTIMEO, mem.asBytes(&milliseconds));
- }
-
- /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
- /// set on a non-blocking socket.
- ///
- /// Set a timeout on the socket that is to occur if no messages are successfully read
- /// from its bound destination after a specified number of milliseconds. A subsequent
- /// read from the socket will thereafter return `error.WouldBlock` should the timeout be
- /// exceeded.
- pub fn setReadTimeout(self: Socket, milliseconds: u32) !void {
- return self.setOption(ws2_32.SOL.SOCKET, ws2_32.SO.RCVTIMEO, mem.asBytes(&milliseconds));
- }
- };
-}
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 159b4dbb3a..6986424b5f 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -125,7 +125,7 @@ pub fn extraData(tree: Ast, index: usize, comptime T: type) T {
const fields = std.meta.fields(T);
var result: T = undefined;
inline for (fields) |field, i| {
- comptime assert(field.field_type == Node.Index);
+ comptime assert(field.type == Node.Index);
@field(result, field.name) = tree.extra_data[index + i];
}
return result;
@@ -559,8 +559,7 @@ pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex {
.container_field,
=> {
const name_token = main_tokens[n];
- if (token_tags[name_token + 1] != .colon) return name_token - end_offset;
- if (name_token > 0 and token_tags[name_token - 1] == .keyword_comptime) {
+ if (token_tags[name_token] != .keyword_comptime and name_token > 0 and token_tags[name_token - 1] == .keyword_comptime) {
end_offset += 1;
}
return name_token - end_offset;
@@ -618,7 +617,7 @@ pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex {
.tagged_union_enum_tag_trailing,
=> {
const main_token = main_tokens[n];
- switch (token_tags[main_token - 1]) {
+ switch (token_tags[main_token -| 1]) {
.keyword_packed, .keyword_extern => end_offset += 1,
else => {},
}
@@ -634,8 +633,8 @@ pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex {
return switch (token_tags[main_token]) {
.asterisk,
.asterisk_asterisk,
- => switch (token_tags[main_token - 1]) {
- .l_bracket => main_token - 1,
+ => switch (token_tags[main_token -| 1]) {
+ .l_bracket => main_token -| 1,
else => main_token,
},
.l_bracket => main_token,
@@ -2015,7 +2014,7 @@ fn fullPtrType(tree: Ast, info: full.PtrType.Components) full.PtrType {
.asterisk_asterisk,
=> switch (token_tags[info.main_token + 1]) {
.r_bracket, .colon => .Many,
- .identifier => if (token_tags[info.main_token - 1] == .l_bracket) Size.C else .One,
+ .identifier => if (token_tags[info.main_token -| 1] == .l_bracket) Size.C else .One,
else => .One,
},
.l_bracket => Size.Slice,
@@ -2060,6 +2059,9 @@ fn fullContainerDecl(tree: Ast, info: full.ContainerDecl.Components) full.Contai
.ast = info,
.layout_token = null,
};
+
+ if (info.main_token == 0) return result;
+
switch (token_tags[info.main_token - 1]) {
.keyword_extern, .keyword_packed => result.layout_token = info.main_token - 1,
else => {},
@@ -2189,9 +2191,9 @@ fn fullCall(tree: Ast, info: full.Call.Components) full.Call {
.ast = info,
.async_token = null,
};
- const maybe_async_token = tree.firstToken(info.fn_expr) - 1;
- if (token_tags[maybe_async_token] == .keyword_async) {
- result.async_token = maybe_async_token;
+ const first_token = tree.firstToken(info.fn_expr);
+ if (first_token != 0 and token_tags[first_token - 1] == .keyword_async) {
+ result.async_token = first_token - 1;
}
return result;
}
diff --git a/lib/std/zig/c_builtins.zig b/lib/std/zig/c_builtins.zig
index 08a2ec78c4..9c28e56e31 100644
--- a/lib/std/zig/c_builtins.zig
+++ b/lib/std/zig/c_builtins.zig
@@ -246,7 +246,9 @@ pub inline fn __builtin_constant_p(expr: anytype) c_int {
return @boolToInt(false);
}
pub fn __builtin_mul_overflow(a: anytype, b: anytype, result: *@TypeOf(a, b)) c_int {
- return @boolToInt(@mulWithOverflow(@TypeOf(a, b), a, b, result));
+ const res = @mulWithOverflow(a, b);
+ result.* = res[0];
+ return res[1];
}
// __builtin_alloca_with_align is not currently implemented.
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index aa9d59dae9..a050e592a2 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -50,7 +50,7 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
},
.Union => |info| {
inline for (info.fields) |field| {
- if (field.field_type == SourceType) return @unionInit(DestType, field.name, target);
+ if (field.type == SourceType) return @unionInit(DestType, field.name, target);
}
@compileError("cast to union type '" ++ @typeName(DestType) ++ "' from type '" ++ @typeName(SourceType) ++ "' which is not present in union");
},
diff --git a/lib/std/zig/number_literal.zig b/lib/std/zig/number_literal.zig
index 118d16f59e..1b41908371 100644
--- a/lib/std/zig/number_literal.zig
+++ b/lib/std/zig/number_literal.zig
@@ -151,12 +151,14 @@ pub fn parseNumberLiteral(bytes: []const u8) Result {
special = 0;
if (float) continue;
- if (x != 0) if (@mulWithOverflow(u64, x, base, &x)) {
- overflow = true;
- };
- if (@addWithOverflow(u64, x, digit, &x)) {
- overflow = true;
+ if (x != 0) {
+ const res = @mulWithOverflow(x, base);
+ if (res[1] != 0) overflow = true;
+ x = res[0];
}
+ const res = @addWithOverflow(x, digit);
+ if (res[1] != 0) overflow = true;
+ x = res[0];
}
if (underscore) return .{ .failure = .{ .trailing_underscore = bytes.len - 1 } };
if (special != 0) return .{ .failure = .{ .trailing_special = bytes.len - 1 } };
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 0155cf0d75..3bb27975db 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -50,22 +50,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
const estimated_node_count = (tokens.len + 2) / 2;
try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
- // Root node must be index 0.
- // Root <- skip ContainerMembers eof
- parser.nodes.appendAssumeCapacity(.{
- .tag = .root,
- .main_token = 0,
- .data = undefined,
- });
- const root_members = try parser.parseContainerMembers();
- const root_decls = try root_members.toSpan(&parser);
- if (parser.token_tags[parser.tok_i] != .eof) {
- try parser.warnExpected(.eof);
- }
- parser.nodes.items(.data)[0] = .{
- .lhs = root_decls.start,
- .rhs = root_decls.end,
- };
+ try parser.parseRoot();
// TODO experiment with compacting the MultiArrayList slices here
return Ast{
@@ -153,7 +138,7 @@ const Parser = struct {
try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
const result = @intCast(u32, p.extra_data.items.len);
inline for (fields) |field| {
- comptime assert(field.field_type == Node.Index);
+ comptime assert(field.type == Node.Index);
p.extra_data.appendAssumeCapacity(@field(extra, field.name));
}
return result;
@@ -237,13 +222,34 @@ const Parser = struct {
return error.ParseError;
}
+ /// Root <- skip container_doc_comment? ContainerMembers eof
+ fn parseRoot(p: *Parser) !void {
+ // Root node must be index 0.
+ p.nodes.appendAssumeCapacity(.{
+ .tag = .root,
+ .main_token = 0,
+ .data = undefined,
+ });
+ const root_members = try p.parseContainerMembers();
+ const root_decls = try root_members.toSpan(p);
+ if (p.token_tags[p.tok_i] != .eof) {
+ try p.warnExpected(.eof);
+ }
+ p.nodes.items(.data)[0] = .{
+ .lhs = root_decls.start,
+ .rhs = root_decls.end,
+ };
+ }
+
/// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
+ ///
/// ContainerDeclarations
/// <- TestDecl ContainerDeclarations
- /// / TopLevelComptime ContainerDeclarations
- /// / KEYWORD_pub? TopLevelDecl ContainerDeclarations
+ /// / ComptimeDecl ContainerDeclarations
+ /// / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
/// /
- /// TopLevelComptime <- KEYWORD_comptime Block
+ ///
+ /// ComptimeDecl <- KEYWORD_comptime Block
fn parseContainerMembers(p: *Parser) !Members {
const scratch_top = p.scratch.items.len;
defer p.scratch.shrinkRetainingCapacity(scratch_top);
@@ -313,7 +319,6 @@ const Parser = struct {
trailing = false;
},
else => {
- p.tok_i += 1;
const identifier = p.tok_i;
defer last_field = identifier;
const container_field = p.expectContainerField() catch |err| switch (err) {
@@ -622,7 +627,7 @@ const Parser = struct {
};
}
- /// TopLevelDecl
+ /// Decl
/// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
/// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
/// / KEYWORD_usingnamespace Expr SEMICOLON
@@ -888,7 +893,9 @@ const Parser = struct {
}
}
- /// ContainerField <- KEYWORD_comptime? IDENTIFIER (COLON TypeExpr ByteAlign?)? (EQUAL Expr)?
+ /// ContainerField
+ /// <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
+ /// / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
fn expectContainerField(p: *Parser) !Node.Index {
var main_token = p.tok_i;
_ = p.eatToken(.keyword_comptime);
@@ -1174,6 +1181,7 @@ const Parser = struct {
}
/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+ ///
/// ForStatement
/// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
/// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
@@ -1235,6 +1243,7 @@ const Parser = struct {
}
/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+ ///
/// WhileStatement
/// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
/// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
@@ -1369,13 +1378,18 @@ const Parser = struct {
}
/// AssignExpr <- Expr (AssignOp Expr)?
+ ///
/// AssignOp
/// <- ASTERISKEQUAL
+ /// / ASTERISKPIPEEQUAL
/// / SLASHEQUAL
/// / PERCENTEQUAL
/// / PLUSEQUAL
+ /// / PLUSPIPEEQUAL
/// / MINUSEQUAL
+ /// / MINUSPIPEEQUAL
/// / LARROW2EQUAL
+ /// / LARROW2PIPEEQUAL
/// / RARROW2EQUAL
/// / AMPERSANDEQUAL
/// / CARETEQUAL
@@ -1554,6 +1568,7 @@ const Parser = struct {
}
/// PrefixExpr <- PrefixOp* PrimaryExpr
+ ///
/// PrefixOp
/// <- EXCLAMATIONMARK
/// / MINUS
@@ -1592,17 +1607,21 @@ const Parser = struct {
}
/// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
+ ///
/// PrefixTypeOp
/// <- QUESTIONMARK
/// / KEYWORD_anyframe MINUSRARROW
/// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+ /// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / ArrayTypeStart
+ ///
/// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
+ ///
/// PtrTypeStart
/// <- ASTERISK
/// / ASTERISK2
/// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET
+ ///
/// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
fn parseTypeExpr(p: *Parser) Error!Node.Index {
switch (p.token_tags[p.tok_i]) {
@@ -2069,6 +2088,7 @@ const Parser = struct {
}
/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+ ///
/// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
fn parseForExpr(p: *Parser) !Node.Index {
const for_token = p.eatToken(.keyword_for) orelse return null_node;
@@ -2104,6 +2124,7 @@ const Parser = struct {
}
/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+ ///
/// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
fn parseWhileExpr(p: *Parser) !Node.Index {
const while_token = p.eatToken(.keyword_while) orelse return null_node;
@@ -2155,6 +2176,7 @@ const Parser = struct {
}
/// CurlySuffixExpr <- TypeExpr InitList?
+ ///
/// InitList
/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
@@ -2273,7 +2295,9 @@ const Parser = struct {
/// SuffixExpr
/// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
+ ///
/// FnCallArguments <- LPAREN ExprList RPAREN
+ ///
/// ExprList <- (Expr COMMA)* Expr?
fn parseSuffixExpr(p: *Parser) !Node.Index {
if (p.eatToken(.keyword_async)) |_| {
@@ -2411,18 +2435,26 @@ const Parser = struct {
/// / KEYWORD_unreachable
/// / STRINGLITERAL
/// / SwitchExpr
+ ///
/// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
- /// ContainerDeclAuto <- ContainerDeclType LBRACE ContainerMembers RBRACE
+ ///
+ /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+ ///
/// InitList
/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
/// / LBRACE RBRACE
+ ///
/// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
+ ///
/// GroupedExpr <- LPAREN Expr RPAREN
+ ///
/// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
+ ///
/// LabeledTypeExpr
/// <- BlockLabel Block
/// / BlockLabel? LoopTypeExpr
+ ///
/// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
fn parsePrimaryTypeExpr(p: *Parser) !Node.Index {
switch (p.token_tags[p.tok_i]) {
@@ -2752,6 +2784,7 @@ const Parser = struct {
}
/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+ ///
/// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
fn parseForTypeExpr(p: *Parser) !Node.Index {
const for_token = p.eatToken(.keyword_for) orelse return null_node;
@@ -2787,6 +2820,7 @@ const Parser = struct {
}
/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+ ///
/// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
fn parseWhileTypeExpr(p: *Parser) !Node.Index {
const while_token = p.eatToken(.keyword_while) orelse return null_node;
@@ -2862,11 +2896,17 @@ const Parser = struct {
}
/// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN
+ ///
/// AsmOutput <- COLON AsmOutputList AsmInput?
+ ///
/// AsmInput <- COLON AsmInputList AsmClobbers?
+ ///
/// AsmClobbers <- COLON StringList
+ ///
/// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
+ ///
/// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
+ ///
/// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
fn expectAsmExpr(p: *Parser) !Node.Index {
const asm_token = p.assertToken(.keyword_asm);
@@ -3070,15 +3110,17 @@ const Parser = struct {
return expr_node;
}
- /// ParamDecl
- /// <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
- /// / DOT3
- /// ParamType
- /// <- Keyword_anytype
- /// / TypeExpr
/// This function can return null nodes and then still return nodes afterwards,
/// such as in the case of anytype and `...`. Caller must look for rparen to find
/// out when there are no more param decls left.
+ ///
+ /// ParamDecl
+ /// <- doc_comment? (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
+ /// / DOT3
+ ///
+ /// ParamType
+ /// <- KEYWORD_anytype
+ /// / TypeExpr
fn expectParamDecl(p: *Parser) !Node.Index {
_ = try p.eatDocComments();
switch (p.token_tags[p.tok_i]) {
@@ -3120,8 +3162,9 @@ const Parser = struct {
return identifier;
}
- /// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
/// Returns the first identifier token, if any.
+ ///
+ /// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
fn parsePtrIndexPayload(p: *Parser) !TokenIndex {
_ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
_ = p.eatToken(.asterisk);
@@ -3134,6 +3177,7 @@ const Parser = struct {
}
/// SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
+ ///
/// SwitchCase
/// <- SwitchItem (COMMA SwitchItem)* COMMA?
/// / KEYWORD_else
@@ -3386,6 +3430,7 @@ const Parser = struct {
}
/// Caller must have already verified the first token.
+ ///
/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
///
/// ContainerDeclType
@@ -3557,6 +3602,7 @@ const Parser = struct {
}
/// Holds temporary data until we are ready to construct the full ContainerDecl AST node.
+ ///
/// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
fn parseByteAlign(p: *Parser) !Node.Index {
_ = p.eatToken(.keyword_align) orelse return null_node;
@@ -3626,6 +3672,7 @@ const Parser = struct {
}
/// FnCallArguments <- LPAREN ExprList RPAREN
+ ///
/// ExprList <- (Expr COMMA)* Expr?
fn parseBuiltinCall(p: *Parser) !Node.Index {
const builtin_token = p.assertToken(.builtin);
@@ -3699,7 +3746,7 @@ const Parser = struct {
}
}
- /// KEYWORD_if LPAREN Expr RPAREN PtrPayload? Body (KEYWORD_else Payload? Body)?
+ /// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
fn parseIf(p: *Parser, comptime bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
const if_token = p.eatToken(.keyword_if) orelse return null_node;
_ = try p.expectToken(.l_paren);
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index c8c6199bf4..1b8a240642 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1,7 +1,9 @@
test "zig fmt: tuple struct" {
try testCanonical(
\\const T = struct {
- \\ comptime u32,
+ \\ /// doc comment on tuple field
+ \\ comptime comptime u32,
+ \\ /// another doc comment on tuple field
\\ *u32 = 1,
\\ // needs to be wrapped in parentheses to not be parsed as a function decl
\\ (fn () void) align(1),
@@ -214,6 +216,34 @@ test "zig fmt: top-level fields" {
);
}
+test "zig fmt: top-level tuple function call type" {
+ try testCanonical(
+ \\foo()
+ \\
+ );
+}
+
+test "zig fmt: top-level enum missing 'const name ='" {
+ try testError(
+ \\enum(u32)
+ \\
+ , &[_]Error{.expected_token});
+}
+
+test "zig fmt: top-level bare asterisk+identifier" {
+ try testCanonical(
+ \\*x
+ \\
+ );
+}
+
+test "zig fmt: top-level bare asterisk+asterisk+identifier" {
+ try testCanonical(
+ \\**x
+ \\
+ );
+}
+
test "zig fmt: C style containers" {
try testError(
\\struct Foo {
@@ -4210,6 +4240,18 @@ test "zig fmt: remove newlines surrounding doc comment within container decl" {
);
}
+test "zig fmt: comptime before comptime field" {
+ try testError(
+ \\const Foo = struct {
+ \\ a: i32,
+ \\ comptime comptime b: i32 = 1234,
+ \\};
+ \\
+ , &[_]Error{
+ .expected_comma_after_field,
+ });
+}
+
test "zig fmt: invalid else branch statement" {
try testError(
\\/// This is a doc comment for a comptime block.
@@ -5500,6 +5542,35 @@ test "zig fmt: canonicalize symbols (keywords)" {
);
}
+test "zig fmt: no space before newline before multiline string" {
+ try testCanonical(
+ \\const S = struct {
+ \\ text: []const u8,
+ \\ comment: []const u8,
+ \\};
+ \\
+ \\test {
+ \\ const s1 = .{
+ \\ .text =
+ \\ \\hello
+ \\ \\world
+ \\ ,
+ \\ .comment = "test",
+ \\ };
+ \\ _ = s1;
+ \\ const s2 = .{
+ \\ .comment = "test",
+ \\ .text =
+ \\ \\hello
+ \\ \\world
+ \\ ,
+ \\ };
+ \\ _ = s2;
+ \\}
+ \\
+ );
+}
+
// Normalize \xNN and \u{NN} escapes and unicode inside @"" escapes.
test "zig fmt: canonicalize symbols (character escapes)" {
try testTransform(
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index ab741e789c..3c02a3bbaf 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1675,7 +1675,12 @@ fn renderStructInit(
try renderToken(ais, tree, struct_init.ast.lbrace + 1, .none); // .
try renderIdentifier(ais, tree, struct_init.ast.lbrace + 2, .space, .eagerly_unquote); // name
- try renderToken(ais, tree, struct_init.ast.lbrace + 3, .space); // =
+ // Don't output a space after the = if expression is a multiline string,
+ // since then it will start on the next line.
+ const nodes = tree.nodes.items(.tag);
+ const expr = nodes[struct_init.ast.fields[0]];
+ var space_after_equal: Space = if (expr == .multiline_string_literal) .none else .space;
+ try renderToken(ais, tree, struct_init.ast.lbrace + 3, space_after_equal); // =
try renderExpression(gpa, ais, tree, struct_init.ast.fields[0], .comma);
for (struct_init.ast.fields[1..]) |field_init| {
@@ -1683,7 +1688,8 @@ fn renderStructInit(
try renderExtraNewlineToken(ais, tree, init_token - 3);
try renderToken(ais, tree, init_token - 3, .none); // .
try renderIdentifier(ais, tree, init_token - 2, .space, .eagerly_unquote); // name
- try renderToken(ais, tree, init_token - 1, .space); // =
+ space_after_equal = if (nodes[field_init] == .multiline_string_literal) .none else .space;
+ try renderToken(ais, tree, init_token - 1, space_after_equal); // =
try renderExpression(gpa, ais, tree, field_init, .comma);
}
diff --git a/lib/std/zig/system/x86.zig b/lib/std/zig/system/x86.zig
index 66468ba6ff..873659e58c 100644
--- a/lib/std/zig/system/x86.zig
+++ b/lib/std/zig/system/x86.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const builtin = @import("builtin");
const Target = std.Target;
const CrossTarget = std.zig.CrossTarget;
@@ -527,25 +528,43 @@ const CpuidLeaf = packed struct {
edx: u32,
};
+/// This is a workaround for the C backend until zig has the ability to put
+/// C code in inline assembly.
+extern fn zig_x86_cpuid(leaf_id: u32, subid: u32, eax: *u32, ebx: *u32, ecx: *u32, edx: *u32) callconv(.C) void;
+
fn cpuid(leaf_id: u32, subid: u32) CpuidLeaf {
// valid for both x86 and x86_64
var eax: u32 = undefined;
var ebx: u32 = undefined;
var ecx: u32 = undefined;
var edx: u32 = undefined;
- asm volatile ("cpuid"
- : [_] "={eax}" (eax),
- [_] "={ebx}" (ebx),
- [_] "={ecx}" (ecx),
- [_] "={edx}" (edx),
- : [_] "{eax}" (leaf_id),
- [_] "{ecx}" (subid),
- );
+
+ if (builtin.zig_backend == .stage2_c) {
+ zig_x86_cpuid(leaf_id, subid, &eax, &ebx, &ecx, &edx);
+ } else {
+ asm volatile ("cpuid"
+ : [_] "={eax}" (eax),
+ [_] "={ebx}" (ebx),
+ [_] "={ecx}" (ecx),
+ [_] "={edx}" (edx),
+ : [_] "{eax}" (leaf_id),
+ [_] "{ecx}" (subid),
+ );
+ }
+
return .{ .eax = eax, .ebx = ebx, .ecx = ecx, .edx = edx };
}
+/// This is a workaround for the C backend until zig has the ability to put
+/// C code in inline assembly.
+extern fn zig_x86_get_xcr0() callconv(.C) u32;
+
// Read control register 0 (XCR0). Used to detect features such as AVX.
fn getXCR0() u32 {
+ if (builtin.zig_backend == .stage2_c) {
+ return zig_x86_get_xcr0();
+ }
+
return asm volatile (
\\ xor %%ecx, %%ecx
\\ xgetbv
diff --git a/lib/zig.h b/lib/zig.h
index cea9a0532a..a45320fca6 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -6,6 +6,12 @@
#include <stddef.h>
#include <stdint.h>
+#if _MSC_VER
+#include <intrin.h>
+#elif defined(__i386__) || defined(__x86_64__)
+#include <cpuid.h>
+#endif
+
#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L
#if __STDC_VERSION__ >= 199901L
#include <stdbool.h>
@@ -38,6 +44,12 @@ typedef char bool;
#define zig_threadlocal zig_threadlocal_unavailable
#endif
+#if _MSC_VER
+#define zig_const_arr
+#else
+#define zig_const_arr static const
+#endif
+
#if zig_has_attribute(naked) || defined(__GNUC__)
#define zig_naked __attribute__((naked))
#elif defined(_MSC_VER)
@@ -65,7 +77,7 @@ typedef char bool;
#elif zig_has_attribute(aligned)
#define zig_align(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
-#define zig_align zig_align_unavailable
+#define zig_align(alignment) __declspec(align(alignment))
#else
#define zig_align zig_align_unavailable
#endif
@@ -73,12 +85,12 @@ typedef char bool;
#if zig_has_attribute(aligned)
#define zig_align_fn(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
-#define zig_align_fn zig_align_fn_unavailable
+#define zig_align_fn(alignment)
#else
#define zig_align_fn zig_align_fn_unavailable
#endif
-#if zig_has_builtin(unreachable)
+#if zig_has_builtin(unreachable) || defined(__GNUC__)
#define zig_unreachable() __builtin_unreachable()
#else
#define zig_unreachable()
@@ -92,13 +104,16 @@ typedef char bool;
#if zig_has_attribute(alias)
#define zig_export(sig, symbol, name) zig_extern sig __attribute__((alias(symbol)))
+#elif _MSC_VER
+#define zig_export(sig, symbol, name) sig;\
+ __pragma(comment(linker, "/alternatename:" name "=" symbol ))
#else
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap)
+#elif zig_has_builtin(trap) || defined(__GNUC__)
#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
@@ -108,7 +123,7 @@ typedef char bool;
#define zig_breakpoint() raise(SIGTRAP)
#endif
-#if zig_has_builtin(return_address)
+#if zig_has_builtin(return_address) || defined(__GNUC__)
#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0))
#elif defined(_MSC_VER)
#define zig_return_address() _ReturnAddress()
@@ -116,13 +131,13 @@ typedef char bool;
#define zig_return_address() 0
#endif
-#if zig_has_builtin(frame_address)
+#if zig_has_builtin(frame_address) || defined(__GNUC__)
#define zig_frame_address() __builtin_frame_address(0)
#else
#define zig_frame_address() 0
#endif
-#if zig_has_builtin(prefetch)
+#if zig_has_builtin(prefetch) || defined(__GNUC__)
#define zig_prefetch(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
#else
#define zig_prefetch(addr, rw, locality)
@@ -136,22 +151,25 @@ typedef char bool;
#define zig_wasm_memory_grow(index, delta) zig_unimplemented()
#endif
+#define zig_concat(lhs, rhs) lhs##rhs
+#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs)
+
#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
#include <stdatomic.h>
#define zig_atomic(type) _Atomic(type)
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail)
-#define zig_atomicrmw_xchg(obj, arg, order) atomic_exchange_explicit (obj, arg, order)
-#define zig_atomicrmw_add(obj, arg, order) atomic_fetch_add_explicit (obj, arg, order)
-#define zig_atomicrmw_sub(obj, arg, order) atomic_fetch_sub_explicit (obj, arg, order)
-#define zig_atomicrmw_or(obj, arg, order) atomic_fetch_or_explicit (obj, arg, order)
-#define zig_atomicrmw_xor(obj, arg, order) atomic_fetch_xor_explicit (obj, arg, order)
-#define zig_atomicrmw_and(obj, arg, order) atomic_fetch_and_explicit (obj, arg, order)
-#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand (obj, arg, order)
-#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order)
-#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order)
-#define zig_atomic_store(obj, arg, order) atomic_store_explicit (obj, arg, order)
-#define zig_atomic_load(obj, order) atomic_load_explicit (obj, order)
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail)
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail)
+#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order)
+#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order)
+#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order)
+#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order)
+#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order)
+#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order)
+#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order)
+#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order)
+#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order)
+#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order)
+#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order)
#define zig_fence(order) atomic_thread_fence(order)
#elif defined(__GNUC__)
#define memory_order_relaxed __ATOMIC_RELAXED
@@ -161,20 +179,43 @@ typedef char bool;
#define memory_order_acq_rel __ATOMIC_ACQ_REL
#define memory_order_seq_cst __ATOMIC_SEQ_CST
#define zig_atomic(type) type
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
-#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order)
-#define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order)
-#define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order)
-#define zig_atomicrmw_or(obj, arg, order) __atomic_fetch_or (obj, arg, order)
-#define zig_atomicrmw_xor(obj, arg, order) __atomic_fetch_xor (obj, arg, order)
-#define zig_atomicrmw_and(obj, arg, order) __atomic_fetch_and (obj, arg, order)
-#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand(obj, arg, order)
-#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order)
-#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order)
-#define zig_atomic_store(obj, arg, order) __atomic_store_n (obj, arg, order)
-#define zig_atomic_load(obj, order) __atomic_load_n (obj, order)
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
+#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order)
+#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order)
+#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order)
+#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order)
+#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order)
+#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order)
+#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order)
+#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order)
+#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order)
+#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order)
+#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order)
#define zig_fence(order) __atomic_thread_fence(order)
+#elif _MSC_VER && (_M_IX86 || _M_X64)
+#define memory_order_relaxed 0
+#define memory_order_consume 1
+#define memory_order_acquire 2
+#define memory_order_release 3
+#define memory_order_acq_rel 4
+#define memory_order_seq_cst 5
+#define zig_atomic(type) type
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired)
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type)
+#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg)
+#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg)
+#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg)
+#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg)
+#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg)
+#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg)
+#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg)
+#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg)
+#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg)
+#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg)
+#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj)
+#define zig_fence(order) __faststorefence()
+// TODO: _MSC_VER && (_M_ARM || _M_ARM64)
#else
#define memory_order_relaxed 0
#define memory_order_consume 1
@@ -183,19 +224,19 @@ typedef char bool;
#define memory_order_acq_rel 4
#define memory_order_seq_cst 5
#define zig_atomic(type) type
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) zig_unimplemented()
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) zig_unimplemented()
-#define zig_atomicrmw_xchg(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_add(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_sub(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_or(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_xor(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_and(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_nand(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_min(obj, arg, order) zig_unimplemented()
-#define zig_atomicrmw_max(obj, arg, order) zig_unimplemented()
-#define zig_atomic_store(obj, arg, order) zig_unimplemented()
-#define zig_atomic_load(obj, order) zig_unimplemented()
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented()
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented()
+#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented()
+#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented()
+#define zig_atomic_store(obj, arg, order, type) zig_unimplemented()
+#define zig_atomic_load(obj, order, type) zig_unimplemented()
#define zig_fence(order) zig_unimplemented()
#endif
@@ -209,9 +250,6 @@ typedef char bool;
#define zig_noreturn void
#endif
-#define zig_concat(lhs, rhs) lhs##rhs
-#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs)
-
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
typedef uintptr_t zig_usize;
@@ -374,7 +412,7 @@ zig_int_helpers(32)
zig_int_helpers(64)
static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_u32 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
@@ -393,7 +431,7 @@ static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_i32 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
@@ -412,7 +450,7 @@ static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
}
static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_u64 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
@@ -431,7 +469,7 @@ static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_i64 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
@@ -450,7 +488,7 @@ static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
}
static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_u8 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
@@ -470,7 +508,7 @@ static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
}
static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_i8 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
@@ -490,7 +528,7 @@ static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
}
static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_u16 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
@@ -510,7 +548,7 @@ static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
}
static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
-#if zig_has_builtin(add_overflow)
+#if zig_has_builtin(add_overflow) || defined(__GNUC__)
zig_i16 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
@@ -530,7 +568,7 @@ static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
}
static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_u32 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
@@ -549,7 +587,7 @@ static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_i32 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
@@ -568,7 +606,7 @@ static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
}
static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_u64 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
@@ -587,7 +625,7 @@ static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_i64 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
@@ -606,7 +644,7 @@ static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
}
static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_u8 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
@@ -626,7 +664,7 @@ static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
}
static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_i8 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
@@ -647,7 +685,7 @@ static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_u16 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
@@ -668,7 +706,7 @@ static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
-#if zig_has_builtin(sub_overflow)
+#if zig_has_builtin(sub_overflow) || defined(__GNUC__)
zig_i16 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
@@ -688,7 +726,7 @@ static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
}
static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_u32 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
@@ -707,7 +745,7 @@ static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_i32 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
@@ -726,7 +764,7 @@ static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
}
static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_u64 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
@@ -745,7 +783,7 @@ static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_i64 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
@@ -764,7 +802,7 @@ static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
}
static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_u8 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
@@ -784,7 +822,7 @@ static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
}
static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_i8 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
@@ -804,7 +842,7 @@ static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
}
static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_u16 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
@@ -824,7 +862,7 @@ static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
}
static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
-#if zig_has_builtin(mul_overflow)
+#if zig_has_builtin(mul_overflow) || defined(__GNUC__)
zig_i16 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
@@ -939,7 +977,7 @@ static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
zig_u16 full_res;
-#if zig_has_builtin(bswap16)
+#if zig_has_builtin(bswap16) || defined(__GNUC__)
full_res = __builtin_bswap16(val);
#else
full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 |
@@ -954,7 +992,7 @@ static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
zig_u32 full_res;
-#if zig_has_builtin(bswap32)
+#if zig_has_builtin(bswap32) || defined(__GNUC__)
full_res = __builtin_bswap32(val);
#else
full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 |
@@ -969,7 +1007,7 @@ static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
zig_u64 full_res;
-#if zig_has_builtin(bswap64)
+#if zig_has_builtin(bswap64) || defined(__GNUC__)
full_res = __builtin_bswap64(val);
#else
full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 |
@@ -1049,7 +1087,7 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
return zig_popcount_u##w((zig_u##w)val, bits); \
}
-#if zig_has_builtin(popcount)
+#if zig_has_builtin(popcount) || defined(__GNUC__)
#define zig_builtin_popcount(w) \
static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
(void)bits; \
@@ -1078,7 +1116,7 @@ zig_builtin_popcount(64)
static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
return zig_ctz_u##w((zig_u##w)val, bits); \
}
-#if zig_has_builtin(ctz)
+#if zig_has_builtin(ctz) || defined(__GNUC__)
#define zig_builtin_ctz(w) \
static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
if (val == 0) return bits; \
@@ -1103,7 +1141,7 @@ zig_builtin_ctz(64)
static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
return zig_clz_u##w((zig_u##w)val, bits); \
}
-#if zig_has_builtin(clz)
+#if zig_has_builtin(clz) || defined(__GNUC__)
#define zig_builtin_clz(w) \
static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
if (val == 0) return bits; \
@@ -1141,6 +1179,8 @@ typedef signed __int128 zig_i128;
#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
+#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
+#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
@@ -1168,6 +1208,8 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
@@ -1289,51 +1331,79 @@ static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
}
static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+ if (rhs == zig_as_u8(0)) return lhs;
+ if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
+ return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
}
static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
+ if (rhs == zig_as_u8(0)) return lhs;
+ if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 };
return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
- if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
+ if (rhs == zig_as_u8(0)) return lhs;
+ if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 };
return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
zig_u128 res;
- res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 res;
- res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) {
zig_u128 res;
- res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 res;
- res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
-static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), (((lhs.hi ^ rhs.hi) & zig_rem_i128(lhs, rhs).hi) < zig_as_i64(0)) ? zig_as_i128(0, 1) : zig_as_i128(0, 0));
+zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
+static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
+ return __multi3(lhs, rhs);
+}
+
+zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
+static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
+ return __udivti3(lhs, rhs);
+};
+
+zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs);
+static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) {
+ return __divti3(lhs, rhs);
+};
+
+zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs);
+static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) {
+ return __umodti3(lhs, rhs);
+}
+
+zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs);
+static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
+ return __modti3(lhs, rhs);
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0));
+ return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
+}
+
+static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
}
#endif /* zig_has_int128 */
@@ -1341,6 +1411,10 @@ static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
#define zig_div_floor_u128 zig_div_trunc_u128
#define zig_mod_u128 zig_rem_u128
+static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
+ return zig_not_u128(zig_and_u128(lhs, rhs), 128);
+}
+
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
}
@@ -1358,7 +1432,7 @@ static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
- zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? -zig_as_i128(0, 1) : zig_as_i128(0, 0);
+ zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
}
@@ -1375,7 +1449,7 @@ static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
}
static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
- return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
+ return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
@@ -1394,6 +1468,17 @@ static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
+#if _MSC_VER
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ zig_u64 lo_carry;
+ zig_u64 lo = _umul128(lhs.lo, rhs.lo, &lo_carry);
+ zig_u64 hi = lhs.hi * rhs.lo + lhs.lo * rhs.hi + lo_carry;
+ return zig_as_u128(hi, lo);
+}
+#else
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs); // TODO
+#endif
+
static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
@@ -1404,18 +1489,6 @@ static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_int128
-static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
- *res = zig_shlw_u128(lhs, rhs, bits);
- return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
-}
-
-static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
- *res = zig_shlw_i128(lhs, rhs, bits);
- zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
- return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
-}
-
static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
@@ -1496,28 +1569,95 @@ static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_
#else /* zig_has_int128 */
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
- return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
- zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
+static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
+ return overflow ||
+ zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
+ zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
+}
+
+static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
+ return overflow ||
+ zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
+ zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
- return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
- zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 full_res;
+ bool overflow =
+ zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
+ zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
+ *res = zig_wrap_u128(full_res, bits);
+ return zig_overflow_u128(overflow, full_res, bits);
+}
+
+zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_c_int overflow_int;
+ zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
+ *res = zig_wrap_i128(full_res, bits);
+ return zig_overflow_i128(overflow_int, full_res, bits);
+}
+
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 full_res;
+ bool overflow =
+ zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
+ zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
+ *res = zig_wrap_u128(full_res, bits);
+ return zig_overflow_u128(overflow, full_res, bits);
+}
+
+zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_c_int overflow_int;
+ zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
+ *res = zig_wrap_i128(full_res, bits);
+ return zig_overflow_i128(overflow_int, full_res, bits);
+}
+
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ *res = zig_mulw_u128(lhs, rhs, bits);
+ return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
+ zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+}
+
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_c_int overflow_int;
+ zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
+ *res = zig_wrap_i128(full_res, bits);
+ return zig_overflow_i128(overflow_int, full_res, bits);
}
#endif /* zig_has_int128 */
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+ *res = zig_shlw_u128(lhs, rhs, bits);
+ return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+}
+
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+ *res = zig_shlw_i128(lhs, rhs, bits);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
+}
+
static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
zig_u128 res;
if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
+
+#if zig_has_int128
return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
+#else
+ return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
+#endif
}
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
zig_i128 res;
- if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, rhs, bits)) return res;
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
}
@@ -1555,8 +1695,9 @@ static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
}
static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
+ if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
- return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + zig_as_u8(64);
+ return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
}
static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
@@ -1593,7 +1734,7 @@ static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
}
static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
- return zig_byte_swap_u128(zig_bitcast_u128(val), bits);
+ return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
@@ -1603,15 +1744,57 @@ static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
}
static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
- return zig_bit_reverse_u128(zig_bitcast_u128(val), bits);
+ return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
/* ========================= Floating Point Support ========================= */
+#if _MSC_VER
+#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300))
+#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300))
+#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300))
+#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f))
+#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f))
+#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f))
+#define __builtin_nan(str) nan(str)
+#define __builtin_nanf(str) nanf(str)
+#define __builtin_nanl(str) nanl(str)
+#define __builtin_inf() zig_msvc_flt_inf
+#define __builtin_inff() zig_msvc_flt_inff
+#define __builtin_infl() zig_msvc_flt_infl
+#endif
+
+
+#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(__GNUC__)
+#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
+#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
+#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
+#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
+#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
+#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
+#else
+#define zig_float_from_repr(Type, ReprType) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
+ return *((zig_##Type*)&repr); \
+ }
+zig_float_from_repr(f16, u16)
+zig_float_from_repr(f32, u32)
+zig_float_from_repr(f64, u64)
+zig_float_from_repr(f80, u128)
+zig_float_from_repr(f128, u128)
+zig_float_from_repr(c_longdouble, u128)
+#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
+#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
+#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
+#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
+#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
+#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
+#endif
+
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
#define zig_libc_name_f16(name) __##name##h
-#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
+#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
#define zig_as_f16(fp, repr) fp##f
@@ -1622,7 +1805,7 @@ typedef double zig_f16;
#define zig_bitSizeOf_c_longdouble 16
typedef long double zig_f16;
#define zig_as_f16(fp, repr) fp##l
-#elif FLT16_MANT_DIG == 11 && zig_has_builtin(inff16)
+#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(__GNUC__))
typedef _Float16 zig_f16;
#define zig_as_f16(fp, repr) fp##f16
#elif defined(__SIZEOF_FP16__)
@@ -1636,12 +1819,18 @@ typedef zig_i16 zig_f16;
#define zig_as_f16(fp, repr) repr
#undef zig_as_special_f16
#define zig_as_special_f16(sign, name, arg, repr) repr
+#undef zig_as_special_constant_f16
+#define zig_as_special_constant_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
#define zig_libc_name_f32(name) name##f
-#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
+#if _MSC_VER
+#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
+#else
+#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
+#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
#define zig_as_f32(fp, repr) fp##f
@@ -1663,12 +1852,18 @@ typedef zig_i32 zig_f32;
#define zig_as_f32(fp, repr) repr
#undef zig_as_special_f32
#define zig_as_special_f32(sign, name, arg, repr) repr
+#undef zig_as_special_constant_f32
+#define zig_as_special_constant_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
#define zig_libc_name_f64(name) name
-#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
+#if _MSC_VER
+#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
+#else
+#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
+#endif
#if FLT_MANT_DIG == 53
typedef float zig_f64;
#define zig_as_f64(fp, repr) fp##f
@@ -1693,12 +1888,14 @@ typedef zig_i64 zig_f64;
#define zig_as_f64(fp, repr) repr
#undef zig_as_special_f64
#define zig_as_special_f64(sign, name, arg, repr) repr
+#undef zig_as_special_constant_f64
+#define zig_as_special_constant_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
#define zig_libc_name_f80(name) __##name##x
-#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
+#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
#define zig_as_f80(fp, repr) fp##f
@@ -1726,12 +1923,14 @@ typedef zig_i128 zig_f80;
#define zig_as_f80(fp, repr) repr
#undef zig_as_special_f80
#define zig_as_special_f80(sign, name, arg, repr) repr
+#undef zig_as_special_constant_f80
+#define zig_as_special_constant_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
#define zig_libc_name_f128(name) name##q
-#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
+#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
#define zig_as_f128(fp, repr) fp##f
@@ -1761,13 +1960,43 @@ typedef zig_i128 zig_f128;
#define zig_as_f128(fp, repr) repr
#undef zig_as_special_f128
#define zig_as_special_f128(sign, name, arg, repr) repr
+#undef zig_as_special_constant_f128
+#define zig_as_special_constant_f128(sign, name, arg, repr) repr
#endif
#define zig_has_c_longdouble 1
+#define zig_libc_name_c_longdouble(name) name##l
+#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
+#ifdef zig_bitSizeOf_c_longdouble
typedef long double zig_c_longdouble;
#define zig_as_c_longdouble(fp, repr) fp##l
-#define zig_libc_name_c_longdouble(name) name##l
-#define zig_as_special_c_longdouble(sign, name, arg, repr) sign __builtin_##name##l(arg)
+#else
+#undef zig_has_c_longdouble
+#define zig_bitSizeOf_c_longdouble 80
+#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
+#define zig_has_c_longdouble 0
+#define zig_repr_c_longdouble i128
+typedef zig_i128 zig_c_longdouble;
+#define zig_as_c_longdouble(fp, repr) repr
+#undef zig_as_special_c_longdouble
+#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
+#undef zig_as_special_constant_c_longdouble
+#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
+#endif
+
+#define zig_cast_f16 (zig_f16)
+#define zig_cast_f32 (zig_f32)
+#define zig_cast_f64 (zig_f64)
+
+#if _MSC_VER && !zig_has_f128
+#define zig_cast_f80
+#define zig_cast_c_longdouble
+#define zig_cast_f128
+#else
+#define zig_cast_f80 (zig_f80)
+#define zig_cast_c_longdouble (zig_c_longdouble)
+#define zig_cast_f128 (zig_f128)
+#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
@@ -1892,3 +2121,268 @@ zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
zig_float_builtins(c_longdouble)
+
+#if _MSC_VER && (_M_IX86 || _M_X64)
+
+// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
+
+#define zig_msvc_atomics(Type, suffix) \
+ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
+ zig_##Type comparand = *expected; \
+ zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
+ bool exchanged = initial == comparand; \
+ if (!exchanged) { \
+ *expected = initial; \
+ } \
+ return exchanged; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ return _InterlockedExchange##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ return _InterlockedExchangeAdd##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = prev - value; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ } \
+ return prev; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ return _InterlockedOr##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ return _InterlockedXor##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ return _InterlockedAnd##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = ~(prev & value); \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ } \
+ return prev; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = value < prev ? value : prev; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ } \
+ return prev; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = value > prev ? value : prev; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ } \
+ return prev; \
+ } \
+ static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ _InterlockedExchange##suffix(obj, value); \
+ } \
+ static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
+ return _InterlockedOr##suffix(obj, 0); \
+ }
+
+zig_msvc_atomics(u8, 8)
+zig_msvc_atomics(i8, 8)
+zig_msvc_atomics(u16, 16)
+zig_msvc_atomics(i16, 16)
+zig_msvc_atomics(u32, )
+zig_msvc_atomics(i32, )
+zig_msvc_atomics(u64, 64)
+zig_msvc_atomics(i64, 64)
+
+#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
+ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
+ zig_##ReprType comparand = *((zig_##ReprType*)expected); \
+ zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
+ bool exchanged = initial == comparand; \
+ if (!exchanged) { \
+ *expected = *((zig_##Type*)&initial); \
+ } \
+ return exchanged; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
+ return *((zig_##Type*)&initial); \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##ReprType new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = prev + value; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
+ } \
+ return prev; \
+ } \
+ static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##ReprType new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = prev - value; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
+ } \
+ return prev; \
+ }
+
+zig_msvc_flt_atomics(f32, u32, )
+zig_msvc_flt_atomics(f64, u64, 64)
+
+#if _M_IX86
+static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
+ return _InterlockedExchangePointer(obj, arg);
+}
+
+static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
+ _InterlockedExchangePointer(obj, arg);
+}
+
+static inline void* zig_msvc_atomic_load_p32(void** obj, zig_u32* arg) {
+ return (void*)_InterlockedOr((void*)obj, 0);
+}
+
+static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desired) {
+ void* comparand = *expected;
+ void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand);
+ bool exchanged = initial == comparand;
+ if (!exchanged) {
+ *expected = initial;
+ }
+ return exchanged;
+}
+#else
+static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
+ return _InterlockedExchangePointer(obj, arg);
+}
+
+static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
+ _InterlockedExchangePointer(obj, arg);
+}
+
+static inline void* zig_msvc_atomic_load_p64(void** obj) {
+ return (void*)_InterlockedOr64((void*)obj, 0);
+}
+
+static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desired) {
+ void* comparand = *expected;
+ void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand);
+ bool exchanged = initial == comparand;
+ if (!exchanged) {
+ *expected = initial;
+ }
+ return exchanged;
+}
+#endif
+
+static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
+ return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
+}
+
+static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
+ return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
+}
+
+#define zig_msvc_atomics_128xchg(Type) \
+ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \
+ } \
+ return prev; \
+ }
+
+zig_msvc_atomics_128xchg(u128)
+zig_msvc_atomics_128xchg(i128)
+
+#define zig_msvc_atomics_128op(Type, operation) \
+ static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ bool success = false; \
+ zig_##Type new; \
+ zig_##Type prev; \
+ while (!success) { \
+ prev = *obj; \
+ new = zig_##operation##_##Type(prev, value); \
+ success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ } \
+ return prev; \
+ }
+
+zig_msvc_atomics_128op(u128, add)
+zig_msvc_atomics_128op(u128, sub)
+zig_msvc_atomics_128op(u128, or)
+zig_msvc_atomics_128op(u128, xor)
+zig_msvc_atomics_128op(u128, and)
+zig_msvc_atomics_128op(u128, nand)
+zig_msvc_atomics_128op(u128, min)
+zig_msvc_atomics_128op(u128, max)
+
+#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
+
+/* ========================= Special Case Intrinsics ========================= */
+
+#if (_MSC_VER && _M_X64) || defined(__x86_64__)
+
+static inline void* zig_x86_64_windows_teb(void) {
+#if _MSC_VER
+ return __readgsqword(0x30);
+#else
+ void* teb;
+ __asm volatile(" movq %%gs:0x30, %[ptr]": [ptr]"=r"(teb)::);
+ return teb;
+#endif
+}
+
+#endif
+
+#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
+
+static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
+ zig_u32 cpu_info[4];
+#if _MSC_VER
+ __cpuidex(cpu_info, leaf_id, subid);
+#else
+ __cpuid_count(leaf_id, subid, cpu_info[0], cpu_info[1], cpu_info[2], cpu_info[3]);
+#endif
+ *eax = cpu_info[0];
+ *ebx = cpu_info[1];
+ *ecx = cpu_info[2];
+ *edx = cpu_info[3];
+}
+
+static inline zig_u32 zig_x86_get_xcr0(void) {
+#if _MSC_VER
+ return (zig_u32)_xgetbv(0);
+#else
+ zig_u32 eax;
+ zig_u32 edx;
+ __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
+ return eax;
+#endif
+}
+
+#endif
diff --git a/src/Air.zig b/src/Air.zig
index 3bcbdb8e98..3ebdd319de 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -31,7 +31,7 @@ pub const Inst = struct {
/// The first N instructions in the main block must be one arg instruction per
/// function parameter. This makes function parameters participate in
/// liveness analysis without any special handling.
- /// Uses the `ty` field.
+ /// Uses the `arg` field.
arg,
/// Float or integer addition. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
@@ -737,6 +737,23 @@ pub const Inst = struct {
/// Uses the `ty_pl` field.
save_err_return_trace_index,
+ /// Store an element to a vector pointer at an index.
+ /// Uses the `vector_store_elem` field.
+ vector_store_elem,
+
+ /// Implements @cVaArg builtin.
+ /// Uses the `ty_op` field.
+ c_va_arg,
+ /// Implements @cVaCopy builtin.
+ /// Uses the `ty_op` field.
+ c_va_copy,
+ /// Implements @cVaEnd builtin.
+ /// Uses the `un_op` field.
+ c_va_end,
+ /// Implements @cVaStart builtin.
+ /// Uses the `ty` field.
+ c_va_start,
+
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
switch (op) {
.lt => return if (optimized) .cmp_lt_optimized else .cmp_lt,
@@ -778,6 +795,10 @@ pub const Inst = struct {
rhs: Ref,
},
ty: Type,
+ arg: struct {
+ ty: Ref,
+ src_index: u32,
+ },
ty_op: struct {
ty: Ref,
operand: Ref,
@@ -814,6 +835,11 @@ pub const Inst = struct {
operand: Ref,
operation: std.builtin.ReduceOp,
},
+ vector_store_elem: struct {
+ vector_ptr: Ref,
+ // Index into a different array.
+ payload: u32,
+ },
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
@@ -1081,10 +1107,12 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.alloc,
.ret_ptr,
- .arg,
.err_return_trace,
+ .c_va_start,
=> return datas[inst].ty,
+ .arg => return air.getRefType(datas[inst].arg.ty),
+
.assembly,
.block,
.constant,
@@ -1147,6 +1175,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.byte_swap,
.bit_reverse,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> return air.getRefType(datas[inst].ty_op.ty),
.loop,
@@ -1177,6 +1207,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.set_union_tag,
.prefetch,
.set_err_return_trace,
+ .vector_store_elem,
+ .c_va_end,
=> return Type.void,
.ptrtoint,
@@ -1250,7 +1282,7 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => air.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]),
i32 => @bitCast(i32, air.extra[i]),
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 8df3d56a88..91dea526dc 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -42,6 +42,7 @@ string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.d
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
+fn_var_args: bool = false,
/// Maps string table indexes to the first `@import` ZIR instruction
/// that uses this string as the operand.
imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{},
@@ -79,7 +80,7 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
const fields = std.meta.fields(@TypeOf(extra));
var i = index;
inline for (fields) |field| {
- astgen.extra.items[i] = switch (field.field_type) {
+ astgen.extra.items[i] = switch (field.type) {
u32 => @field(extra, field.name),
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
@@ -2504,7 +2505,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.err_union_code,
.err_union_code_ptr,
.ptr_type,
- .overflow_arithmetic_ptr,
.enum_literal,
.merge_error_sets,
.error_union_type,
@@ -2542,7 +2542,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.type_info,
.size_of,
.bit_size_of,
- .log2_int_type,
.typeof_log2_int_type,
.ptr_to_int,
.align_of,
@@ -3366,7 +3365,7 @@ fn ptrType(
var trailing_count: u32 = 0;
if (ptr_info.ast.sentinel != 0) {
- sentinel_ref = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel);
+ sentinel_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel);
trailing_count += 1;
}
if (ptr_info.ast.align_node != 0) {
@@ -3469,7 +3468,7 @@ fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.
}
const len = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node, node);
const elem_type = try typeExpr(gz, scope, extra.elem_type);
- const sentinel = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node);
+ const sentinel = try reachableExprComptime(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node, true);
const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{
.len = len,
@@ -3892,10 +3891,6 @@ fn fnDecl(
.noalias_bits = noalias_bits,
});
} else func: {
- if (is_var_args) {
- return astgen.failTok(fn_proto.ast.fn_token, "non-extern function is variadic", .{});
- }
-
// as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz
fn_gz.instructions_top = ret_gz.instructions.items.len;
@@ -3903,6 +3898,10 @@ fn fnDecl(
astgen.fn_block = &fn_gz;
defer astgen.fn_block = prev_fn_block;
+ const prev_var_args = astgen.fn_var_args;
+ astgen.fn_var_args = is_var_args;
+ defer astgen.fn_var_args = prev_var_args;
+
astgen.advanceSourceCursorToNode(body_node);
const lbrace_line = astgen.source_line - decl_gz.decl_line;
const lbrace_column = astgen.source_column;
@@ -6071,7 +6070,7 @@ fn whileExpr(
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err;
break :c .{
.inst = err_union,
- .bool_bit = try cond_scope.addUnNode(tag, err_union, while_full.ast.then_expr),
+ .bool_bit = try cond_scope.addUnNode(tag, err_union, while_full.ast.cond_expr),
};
} else if (while_full.payload_token) |_| {
const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
@@ -6079,7 +6078,7 @@ fn whileExpr(
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
break :c .{
.inst = optional,
- .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.then_expr),
+ .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr),
};
} else {
const cond = try expr(&cond_scope, &cond_scope.base, bool_ri, while_full.ast.cond_expr);
@@ -6943,7 +6942,13 @@ fn switchExpr(
// it as the break operand.
if (body_len < 2)
break :blk;
- const store_inst = payloads.items[end_index - 2];
+
+ var store_index = end_index - 2;
+ while (true) : (store_index -= 1) switch (zir_tags[payloads.items[store_index]]) {
+ .dbg_block_end, .dbg_block_begin, .dbg_stmt, .dbg_var_val, .dbg_var_ptr => {},
+ else => break,
+ };
+ const store_inst = payloads.items[store_index];
if (zir_tags[store_inst] != .store_to_block_ptr or
zir_datas[store_inst].bin.lhs != block_scope.rl_ptr)
break :blk;
@@ -8235,21 +8240,7 @@ fn builtinCall(
.add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow),
.sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow),
.mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow),
- .shl_with_overflow => {
- const int_type = try typeExpr(gz, scope, params[0]);
- const log2_int_type = try gz.addUnNode(.log2_int_type, int_type, params[0]);
- const ptr_type = try gz.addUnNode(.overflow_arithmetic_ptr, int_type, params[0]);
- const lhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[1]);
- const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type } }, params[2]);
- const ptr = try expr(gz, scope, .{ .rl = .{ .ty = ptr_type } }, params[3]);
- const result = try gz.addExtendedPayload(.shl_with_overflow, Zir.Inst.OverflowArithmetic{
- .node = gz.nodeIndexToRelative(node),
- .lhs = lhs,
- .rhs = rhs,
- .ptr = ptr,
- });
- return rvalue(gz, ri, result, node);
- },
+ .shl_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .shl_with_overflow),
.atomic_load => {
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{
@@ -8384,6 +8375,46 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
+ .c_va_arg => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaArg' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_arg, Zir.Inst.BinNode{
+ .node = gz.nodeIndexToRelative(node),
+ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ .rhs = try typeExpr(gz, scope, params[1]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_copy => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaCopy' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_copy, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_end => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaEnd' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_end, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_start => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaStart' outside function scope", .{});
+ }
+ if (!astgen.fn_var_args) {
+ return astgen.failNode(node, "'@cVaStart' in a non-variadic function", .{});
+ }
+ return rvalue(gz, ri, try gz.addNodeExtended(.c_va_start, node), node);
+ },
}
}
@@ -8650,16 +8681,12 @@ fn overflowArithmetic(
params: []const Ast.Node.Index,
tag: Zir.Inst.Extended,
) InnerError!Zir.Inst.Ref {
- const int_type = try typeExpr(gz, scope, params[0]);
- const ptr_type = try gz.addUnNode(.overflow_arithmetic_ptr, int_type, params[0]);
- const lhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[1]);
- const rhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]);
- const ptr = try expr(gz, scope, .{ .rl = .{ .ty = ptr_type } }, params[3]);
- const result = try gz.addExtendedPayload(tag, Zir.Inst.OverflowArithmetic{
+ const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]);
+ const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]);
+ const result = try gz.addExtendedPayload(tag, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
.lhs = lhs,
.rhs = rhs,
- .ptr = ptr,
});
return rvalue(gz, ri, result, node);
}
@@ -12129,7 +12156,7 @@ const GenZir = struct {
const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
try gz.astgen.instructions.append(gpa, .{ .tag = .dbg_block_end, .data = undefined });
- try gz.instructions.insert(gpa, gz.instructions.items.len - 1, new_index);
+ try gz.instructions.append(gpa, new_index);
}
};
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 55ff048482..6f7e15c4f6 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -637,9 +637,9 @@ const DocData = struct {
inline for (comptime std.meta.fields(Type)) |case| {
if (@field(Type, case.name) == active_tag) {
const current_value = @field(self, case.name);
- inline for (comptime std.meta.fields(case.field_type)) |f| {
+ inline for (comptime std.meta.fields(case.type)) |f| {
try jsw.arrayElem();
- if (f.field_type == std.builtin.Type.Pointer.Size) {
+ if (f.type == std.builtin.Type.Pointer.Size) {
try jsw.emitNumber(@enumToInt(@field(current_value, f.name)));
} else {
try std.json.stringify(@field(current_value, f.name), opts, w);
@@ -1510,26 +1510,6 @@ fn walkInstruction(
// return operand;
// },
- .overflow_arithmetic_ptr => {
- const un_node = data[inst_index].un_node;
-
- const elem_type_ref = try self.walkRef(file, parent_scope, parent_src, un_node.operand, false);
- const type_slot_index = self.types.items.len;
- try self.types.append(self.arena, .{
- .Pointer = .{
- .size = .One,
- .child = elem_type_ref.expr,
- .is_mutable = true,
- .is_volatile = false,
- .is_allowzero = false,
- },
- });
-
- return DocData.WalkResult{
- .typeRef = .{ .type = @enumToInt(Ref.type_type) },
- .expr = .{ .type = type_slot_index },
- };
- },
.ptr_type => {
const ptr = data[inst_index].ptr_type;
const extra = file.zir.extraData(Zir.Inst.PtrType, ptr.payload_index);
@@ -3818,7 +3798,7 @@ fn analyzeFancyFunction(
file,
scope,
parent_src,
- fn_info.body[fn_info.body.len - 1],
+ fn_info.body[0],
);
} else {
break :blk null;
@@ -3956,7 +3936,7 @@ fn analyzeFunction(
file,
scope,
parent_src,
- fn_info.body[fn_info.body.len - 1],
+ fn_info.body[0],
);
} else {
break :blk null;
@@ -3997,11 +3977,25 @@ fn getGenericReturnType(
file: *File,
scope: *Scope,
parent_src: SrcLocInfo, // function decl line
- body_end: usize,
+ body_main_block: usize,
) !DocData.Expr {
- // TODO: compute the correct line offset
- const wr = try self.walkInstruction(file, scope, parent_src, body_end - 3, false);
- return wr.expr;
+ const tags = file.zir.instructions.items(.tag);
+ const data = file.zir.instructions.items(.data);
+
+ // We expect `body_main_block` to be the first instruction
+ // inside the function body, and for it to be a block instruction.
+ const pl_node = data[body_main_block].pl_node;
+ const extra = file.zir.extraData(Zir.Inst.Block, pl_node.payload_index);
+ const maybe_ret_node = file.zir.extra[extra.end..][extra.data.body_len - 4];
+ switch (tags[maybe_ret_node]) {
+ .ret_node, .ret_load => {
+ const wr = try self.walkInstruction(file, scope, parent_src, maybe_ret_node, false);
+ return wr.expr;
+ },
+ else => {
+ return DocData.Expr{ .comptimeExpr = 0 };
+ },
+ }
}
fn collectUnionFieldInfo(
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 24625dc10a..b71d96c3dd 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -30,6 +30,10 @@ pub const Tag = enum {
compile_log,
ctz,
c_undef,
+ c_va_arg,
+ c_va_copy,
+ c_va_end,
+ c_va_start,
div_exact,
div_floor,
div_trunc,
@@ -150,7 +154,7 @@ pub const list = list: {
"@addWithOverflow",
.{
.tag = .add_with_overflow,
- .param_count = 4,
+ .param_count = 2,
},
},
.{
@@ -355,6 +359,30 @@ pub const list = list: {
},
},
.{
+ "@cVaArg", .{
+ .tag = .c_va_arg,
+ .param_count = 2,
+ },
+ },
+ .{
+ "@cVaCopy", .{
+ .tag = .c_va_copy,
+ .param_count = 1,
+ },
+ },
+ .{
+ "@cVaEnd", .{
+ .tag = .c_va_end,
+ .param_count = 1,
+ },
+ },
+ .{
+ "@cVaStart", .{
+ .tag = .c_va_start,
+ .param_count = 0,
+ },
+ },
+ .{
"@divExact",
.{
.tag = .div_exact,
@@ -608,7 +636,7 @@ pub const list = list: {
"@mulWithOverflow",
.{
.tag = .mul_with_overflow,
- .param_count = 4,
+ .param_count = 2,
},
},
.{
@@ -713,7 +741,7 @@ pub const list = list: {
"@shlWithOverflow",
.{
.tag = .shl_with_overflow,
- .param_count = 4,
+ .param_count = 2,
},
},
.{
@@ -861,7 +889,7 @@ pub const list = list: {
"@subWithOverflow",
.{
.tag = .sub_with_overflow,
- .param_count = 4,
+ .param_count = 2,
},
},
.{
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 1d0997d20c..3aa9663ed5 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -7,9 +7,6 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.compilation);
const Target = std.Target;
-const ArrayList = std.ArrayList;
-const Sha256 = std.crypto.hash.sha2.Sha256;
-const fs = std.fs;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
@@ -181,10 +178,6 @@ emit_docs: ?EmitLoc,
work_queue_wait_group: WaitGroup = .{},
astgen_wait_group: WaitGroup = .{},
-/// Exported symbol names. This is only for when the target is wasm.
-/// TODO: Remove this when Stage2 becomes the default compiler as it will already have this information.
-export_symbol_names: std.ArrayListUnmanaged([]const u8) = .{},
-
pub const default_stack_protector_buffer_size = 4;
pub const SemaError = Module.SemaError;
@@ -572,7 +565,7 @@ pub const AllErrors = struct {
self.arena.promote(gpa).deinit();
}
- fn add(
+ pub fn add(
module: *Module,
arena: *std.heap.ArenaAllocator,
errors: *std.ArrayList(Message),
@@ -591,7 +584,17 @@ pub const AllErrors = struct {
Message.HashContext,
std.hash_map.default_max_load_percentage,
).init(allocator);
- const err_source = try module_err_msg.src_loc.file_scope.getSource(module.gpa);
+ const err_source = module_err_msg.src_loc.file_scope.getSource(module.gpa) catch |err| {
+ const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
+ try errors.append(.{
+ .plain = .{
+ .msg = try std.fmt.allocPrint(allocator, "unable to load '{s}': {s}", .{
+ file_path, @errorName(err),
+ }),
+ },
+ });
+ return;
+ };
const err_span = try module_err_msg.src_loc.span(module.gpa);
const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main);
@@ -954,6 +957,7 @@ pub const InitOptions = struct {
linker_allow_shlib_undefined: ?bool = null,
linker_bind_global_refs_locally: ?bool = null,
linker_import_memory: ?bool = null,
+ linker_import_symbols: bool = false,
linker_import_table: bool = false,
linker_export_table: bool = false,
linker_initial_memory: ?u64 = null,
@@ -964,6 +968,7 @@ pub const InitOptions = struct {
linker_print_gc_sections: bool = false,
linker_print_icf_sections: bool = false,
linker_print_map: bool = false,
+ linker_opt_bisect_limit: i32 = -1,
each_lib_rpath: ?bool = null,
build_id: ?bool = null,
disable_c_depfile: bool = false,
@@ -1464,7 +1469,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.gpa = gpa,
.manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(options.zig_lib_directory);
cache.addPrefix(options.local_cache_directory);
errdefer cache.manifest_dir.close();
@@ -1811,6 +1816,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.bind_global_refs_locally = options.linker_bind_global_refs_locally orelse false,
.compress_debug_sections = options.linker_compress_debug_sections orelse .none,
.import_memory = options.linker_import_memory orelse false,
+ .import_symbols = options.linker_import_symbols,
.import_table = options.linker_import_table,
.export_table = options.linker_export_table,
.initial_memory = options.linker_initial_memory,
@@ -1821,6 +1827,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.print_gc_sections = options.linker_print_gc_sections,
.print_icf_sections = options.linker_print_icf_sections,
.print_map = options.linker_print_map,
+ .opt_bisect_limit = options.linker_opt_bisect_limit,
.z_nodelete = options.linker_z_nodelete,
.z_notext = options.linker_z_notext,
.z_defs = options.linker_z_defs,
@@ -2166,11 +2173,6 @@ pub fn destroy(self: *Compilation) void {
self.cache_parent.manifest_dir.close();
if (self.owned_link_dir) |*dir| dir.close();
- for (self.export_symbol_names.items) |symbol_name| {
- gpa.free(symbol_name);
- }
- self.export_symbol_names.deinit(gpa);
-
// This destroys `self`.
self.arena_state.promote(gpa).deinit();
}
@@ -3977,70 +3979,6 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
}
}
- // Windows has an argument length limit of 32,766 characters, macOS 262,144 and Linux
- // 2,097,152. If our args exceed 30 KiB, we instead write them to a "response file" and
- // pass that to zig, e.g. via 'zig build-lib @args.rsp'
- // See @file syntax here: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html
- var args_length: usize = 0;
- for (argv.items) |arg| {
- args_length += arg.len + 1; // +1 to account for null terminator
- }
- if (args_length >= 30 * 1024) {
- const allocator = comp.gpa;
- const input_args = argv.items[2..];
- const output_dir = comp.local_cache_directory;
-
- var args_arena = std.heap.ArenaAllocator.init(allocator);
- defer args_arena.deinit();
-
- const args_to_escape = input_args;
- var escaped_args = try ArrayList([]const u8).initCapacity(args_arena.allocator(), args_to_escape.len);
-
- arg_blk: for (args_to_escape) |arg| {
- for (arg) |c, arg_idx| {
- if (c == '\\' or c == '"') {
- // Slow path for arguments that need to be escaped. We'll need to allocate and copy
- var escaped = try ArrayList(u8).initCapacity(args_arena.allocator(), arg.len + 1);
- const writer = escaped.writer();
- writer.writeAll(arg[0..arg_idx]) catch unreachable;
- for (arg[arg_idx..]) |to_escape| {
- if (to_escape == '\\' or to_escape == '"') try writer.writeByte('\\');
- try writer.writeByte(to_escape);
- }
- escaped_args.appendAssumeCapacity(escaped.items);
- continue :arg_blk;
- }
- }
- escaped_args.appendAssumeCapacity(arg); // no escaping needed so just use original argument
- }
-
- const partially_quoted = try std.mem.join(allocator, "\" \"", escaped_args.items);
- const args = try std.mem.concat(allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
-
- // Write the args to zig-cache/args/<SHA256 hash of args> to avoid conflicts with
- // other zig build commands running in parallel.
-
- var args_hash: [Sha256.digest_length]u8 = undefined;
- Sha256.hash(args, &args_hash, .{});
- var args_hex_hash: [Sha256.digest_length * 2]u8 = undefined;
- _ = try std.fmt.bufPrint(
- &args_hex_hash,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&args_hash)},
- );
-
- const args_dir = "args";
- try output_dir.handle.makePath(args_dir);
- const args_file = try fs.path.join(allocator, &[_][]const u8{
- args_dir, args_hex_hash[0..],
- });
- try output_dir.handle.writeFile(args_file, args);
- const args_file_path = try output_dir.handle.realpathAlloc(allocator, args_file);
-
- argv.shrinkRetainingCapacity(2);
- try argv.append(try std.mem.concat(allocator, u8, &[_][]const u8{ "@", args_file_path }));
- }
-
if (comp.verbose_cc) {
dump_argv(argv.items);
}
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 5e50780315..74155ca657 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -259,7 +259,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, ip.extra.items.len);
inline for (fields) |field| {
- ip.extra.appendAssumeCapacity(switch (field.field_type) {
+ ip.extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
Index => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
@@ -274,7 +274,7 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T {
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => ip.extra.items[i],
Index => @intToEnum(Index, ip.extra.items[i]),
i32 => @bitCast(i32, ip.extra.items[i]),
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 7b2f2fd40d..e775883b1f 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -212,6 +212,15 @@ pub fn categorizeOperand(
return .write;
},
+ .vector_store_elem => {
+ const o = air_datas[inst].vector_store_elem;
+ const extra = air.extraData(Air.Bin, o.payload).data;
+ if (o.vector_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
+ if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
+ if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
+ return .write;
+ },
+
.arg,
.alloc,
.ret_ptr,
@@ -229,6 +238,7 @@ pub fn categorizeOperand(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
+ .c_va_start,
=> return .none,
.fence => return .write,
@@ -270,6 +280,8 @@ pub fn categorizeOperand(
.splat,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> {
const o = air_datas[inst].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -313,6 +325,7 @@ pub fn categorizeOperand(
.trunc_float,
.neg,
.cmp_lt_errors_len,
+ .c_va_end,
=> {
const o = air_datas[inst].un_op;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -709,7 +722,7 @@ const Analysis = struct {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, a.extra.items.len);
inline for (fields) |field| {
- a.extra.appendAssumeCapacity(switch (field.field_type) {
+ a.extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
else => @compileError("bad field type"),
});
@@ -824,6 +837,12 @@ fn analyzeInst(
return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
},
+ .vector_store_elem => {
+ const o = inst_datas[inst].vector_store_elem;
+ const extra = a.air.extraData(Air.Bin, o.payload).data;
+ return trackOperands(a, new_set, inst, main_tomb, .{ o.vector_ptr, extra.lhs, extra.rhs });
+ },
+
.arg,
.alloc,
.ret_ptr,
@@ -842,6 +861,7 @@ fn analyzeInst(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
+ .c_va_start,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,
@@ -883,6 +903,8 @@ fn analyzeInst(
.splat,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
@@ -921,6 +943,7 @@ fn analyzeInst(
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
+ .c_va_end,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
diff --git a/src/Module.zig b/src/Module.zig
index 3347280f59..9c7d3ee885 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3540,8 +3540,8 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
if (decl.getInnerNamespace()) |namespace| {
namespace.destroyDecls(mod);
}
- decl.clearValues(mod);
}
+ decl.clearValues(mod);
decl.dependants.deinit(gpa);
decl.dependencies.deinit(gpa);
decl.clearName(gpa);
@@ -4610,9 +4610,18 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// We need the memory for the Type to go into the arena for the Decl
var decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer decl_arena.deinit();
const decl_arena_allocator = decl_arena.allocator();
+ const decl_arena_state = blk: {
+ errdefer decl_arena.deinit();
+ const s = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
+ break :blk s;
+ };
+ defer {
+ decl_arena_state.* = decl_arena.state;
+ decl.value_arena = decl_arena_state;
+ }
+
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
const analysis_arena_allocator = analysis_arena.allocator();
@@ -4681,8 +4690,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// not the struct itself.
try sema.resolveTypeLayout(decl_tv.ty);
- const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
-
if (decl.is_usingnamespace) {
if (!decl_tv.ty.eql(Type.type, mod)) {
return sema.fail(&block_scope, ty_src, "expected type, found {}", .{
@@ -4701,8 +4708,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.@"linksection" = null;
decl.has_tv = true;
decl.owns_tv = false;
- decl_arena_state.* = decl_arena.state;
- decl.value_arena = decl_arena_state;
decl.analysis = .complete;
decl.generation = mod.generation;
@@ -4723,16 +4728,14 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
}
- decl.clearValues(mod);
}
+ decl.clearValues(mod);
decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
decl.val = try decl_tv.val.copy(decl_arena_allocator);
// linksection, align, and addrspace were already set by Sema
decl.has_tv = true;
decl.owns_tv = owns_tv;
- decl_arena_state.* = decl_arena.state;
- decl.value_arena = decl_arena_state;
decl.analysis = .complete;
decl.generation = mod.generation;
@@ -4767,8 +4770,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
var type_changed = true;
if (decl.has_tv) {
type_changed = !decl.ty.eql(decl_tv.ty, mod);
- decl.clearValues(mod);
}
+ decl.clearValues(mod);
decl.owns_tv = false;
var queue_linker_work = false;
@@ -4841,8 +4844,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
};
};
decl.has_tv = true;
- decl_arena_state.* = decl_arena.state;
- decl.value_arena = decl_arena_state;
decl.analysis = .complete;
decl.generation = mod.generation;
@@ -5447,8 +5448,8 @@ pub fn clearDecl(
if (decl.getInnerNamespace()) |namespace| {
try namespace.deleteAllDecls(mod, outdated_decls);
}
- decl.clearValues(mod);
}
+ decl.clearValues(mod);
if (decl.deletion_flag) {
decl.deletion_flag = false;
@@ -5672,11 +5673,15 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
runtime_param_index += 1;
continue;
}
+ const air_ty = try sema.addType(param_ty);
const arg_index = @intCast(u32, sema.air_instructions.len);
inner_block.instructions.appendAssumeCapacity(arg_index);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .arg,
- .data = .{ .ty = param_ty },
+ .data = .{ .arg = .{
+ .ty = air_ty,
+ .src_index = @intCast(u32, total_param_index),
+ } },
});
sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index));
total_param_index += 1;
diff --git a/src/Sema.zig b/src/Sema.zig
index 8c7c8b0dd7..92024f7178 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -113,6 +113,7 @@ const target_util = @import("target.zig");
const Package = @import("Package.zig");
const crash_report = @import("crash_report.zig");
const build_options = @import("build_options");
+const Compilation = @import("Compilation.zig");
pub const default_branch_quota = 1000;
pub const default_reference_trace_len = 2;
@@ -334,6 +335,7 @@ pub const Block = struct {
/// It is shared among all the blocks in an inline or comptime called
/// function.
pub const Inlining = struct {
+ func: ?*Module.Fn,
comptime_result: Air.Inst.Ref,
merges: Merges,
};
@@ -474,13 +476,6 @@ pub const Block = struct {
});
}
- fn addArg(block: *Block, ty: Type) error{OutOfMemory}!Air.Inst.Ref {
- return block.addInst(.{
- .tag = .arg,
- .data = .{ .ty = ty },
- });
- }
-
fn addStructFieldPtr(
block: *Block,
struct_ptr: Air.Inst.Ref,
@@ -969,7 +964,6 @@ fn analyzeBodyInner(
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
- .overflow_arithmetic_ptr => try sema.zirOverflowArithmeticPtr(block, inst),
.ref => try sema.zirRef(block, inst),
.ret_err_value_code => try sema.zirRetErrValueCode(inst),
.shr => try sema.zirShr(block, inst, .shr),
@@ -991,7 +985,6 @@ fn analyzeBodyInner(
.bit_size_of => try sema.zirBitSizeOf(block, inst),
.typeof => try sema.zirTypeof(block, inst),
.typeof_builtin => try sema.zirTypeofBuiltin(block, inst),
- .log2_int_type => try sema.zirLog2IntType(block, inst),
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
.xor => try sema.zirBitwise(block, inst, .xor),
.struct_init_empty => try sema.zirStructInitEmpty(block, inst),
@@ -1148,6 +1141,10 @@ fn analyzeBodyInner(
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
.cmpxchg => try sema.zirCmpxchg( block, extended),
.addrspace_cast => try sema.zirAddrSpaceCast( block, extended),
+ .c_va_arg => try sema.zirCVaArg( block, extended),
+ .c_va_copy => try sema.zirCVaCopy( block, extended),
+ .c_va_end => try sema.zirCVaEnd( block, extended),
+ .c_va_start => try sema.zirCVaStart( block, extended),
// zig fmt: on
.fence => {
@@ -2187,18 +2184,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
@setCold(true);
if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) {
- const err_path = err_msg.src_loc.file_scope.fullPath(sema.mod.gpa) catch unreachable;
- const err_source = err_msg.src_loc.file_scope.getSource(sema.mod.gpa) catch unreachable;
if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation;
- const err_span = err_msg.src_loc.span(sema.mod.gpa) catch unreachable;
- const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main);
- std.debug.print("compile error during Sema:\n{s}:{d}:{d}: error: {s}\n{s}\n\n", .{
- err_path,
- err_loc.line + 1,
- err_loc.column + 1,
- err_msg.msg,
- err_loc.source_line,
- });
+ var arena = std.heap.ArenaAllocator.init(sema.gpa);
+ errdefer arena.deinit();
+ var errors = std.ArrayList(Compilation.AllErrors.Message).init(sema.gpa);
+ defer errors.deinit();
+
+ Compilation.AllErrors.add(sema.mod, &arena, &errors, err_msg.*) catch unreachable;
+
+ std.debug.print("compile error during Sema:\n", .{});
+ Compilation.AllErrors.Message.renderToStdErr(errors.items[0], .no_color);
crash_report.compilerPanic("unexpected compile error occurred", null, null);
}
@@ -2541,6 +2536,9 @@ fn coerceResultPtr(
.wrap_errunion_payload => {
new_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, new_ptr, false, true);
},
+ .array_to_slice => {
+ return sema.fail(block, src, "TODO coerce_result_ptr array_to_slice", .{});
+ },
else => {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected AIR tag for coerce_result_ptr: {}", .{
@@ -3254,17 +3252,16 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
- return sema.ensureResultUsed(block, operand, src);
+ return sema.ensureResultUsed(block, sema.typeOf(operand), src);
}
fn ensureResultUsed(
sema: *Sema,
block: *Block,
- operand: Air.Inst.Ref,
+ ty: Type,
src: LazySrcLoc,
) CompileError!void {
- const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (ty.zigTypeTag()) {
.Void, .NoReturn => return,
.ErrorSet, .ErrorUnion => {
const msg = msg: {
@@ -3277,7 +3274,7 @@ fn ensureResultUsed(
},
else => {
const msg = msg: {
- const msg = try sema.errMsg(block, src, "value of type '{}' ignored", .{operand_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(block, src, "value of type '{}' ignored", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "all non-void values must be used", .{});
try sema.errNote(block, src, msg, "this error can be suppressed by assigning the value to '_'", .{});
@@ -6426,7 +6423,11 @@ fn analyzeCall(
}),
else => unreachable,
};
- if (!is_comptime_call and module_fn.state == .sema_failure) return error.AnalysisFail;
+ if (func_ty_info.is_var_args) {
+ return sema.fail(block, call_src, "{s} call of variadic function", .{
+ @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
+ });
+ }
// Analyze the ZIR. The same ZIR gets analyzed into a runtime function
// or an inlined call depending on what union tag the `label` field is
@@ -6441,6 +6442,7 @@ fn analyzeCall(
// This one is shared among sub-blocks within the same callee, but not
// shared among the entire inline/comptime call stack.
var inlining: Block.Inlining = .{
+ .func = null,
.comptime_result = undefined,
.merges = .{
.results = .{},
@@ -6527,6 +6529,7 @@ fn analyzeCall(
const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst);
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, fn_info.param_body);
+ var has_comptime_args = false;
var arg_i: usize = 0;
for (fn_info.param_body) |inst| {
sema.analyzeInlineCallArg(
@@ -6542,6 +6545,7 @@ fn analyzeCall(
memoized_call_key,
func_ty_info.param_types,
func,
+ &has_comptime_args,
) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = sema.inst_map.remove(inst);
@@ -6559,6 +6563,7 @@ fn analyzeCall(
memoized_call_key,
func_ty_info.param_types,
func,
+ &has_comptime_args,
);
unreachable;
},
@@ -6566,6 +6571,19 @@ fn analyzeCall(
};
}
+ if (!has_comptime_args and module_fn.state == .sema_failure) return error.AnalysisFail;
+
+ const recursive_msg = "inline call is recursive";
+ var head = if (!has_comptime_args) block else null;
+ while (head) |some| {
+ const parent_inlining = some.inlining orelse break;
+ if (parent_inlining.func == module_fn) {
+ return sema.fail(block, call_src, recursive_msg, .{});
+ }
+ head = some.parent;
+ }
+ if (!has_comptime_args) inlining.func = module_fn;
+
// In case it is a generic function with an expression for the return type that depends
// on parameters, we must now do the same for the return type as we just did with
// each of the parameters, resolving the return type and providing it to the child
@@ -6641,11 +6659,16 @@ fn analyzeCall(
};
}
+ if (is_comptime_call and ensure_result_used) {
+ try sema.ensureResultUsed(block, fn_ret_ty, call_src);
+ }
+
const result = result: {
sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) {
error.ComptimeReturn => break :result inlining.comptime_result,
error.AnalysisFail => {
const err_msg = sema.err orelse return err;
+ if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err;
try sema.errNote(block, call_src, err_msg, "called from here", .{});
err_msg.clearTrace(sema.gpa);
return err;
@@ -6763,7 +6786,7 @@ fn analyzeCall(
};
if (ensure_result_used) {
- try sema.ensureResultUsed(block, result, call_src);
+ try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
}
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
@@ -6803,9 +6826,14 @@ fn analyzeInlineCallArg(
memoized_call_key: Module.MemoizedCall.Key,
raw_param_types: []const Type,
func_inst: Air.Inst.Ref,
+ has_comptime_args: *bool,
) !void {
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
+ .param_comptime, .param_anytype_comptime => has_comptime_args.* = true,
+ else => {},
+ }
+ switch (zir_tags[inst]) {
.param, .param_comptime => {
// Evaluate the parameter type expression now that previous ones have
// been mapped, and coerce the corresponding argument to it.
@@ -6859,23 +6887,20 @@ fn analyzeInlineCallArg(
.ty = param_ty,
.val = arg_val,
};
- } else if (zir_tags[inst] == .param_comptime or try sema.typeRequiresComptime(param_ty)) {
- sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
- } else if (try sema.resolveMaybeUndefVal(casted_arg)) |val| {
- // We have a comptime value but we need a runtime value to preserve inlining semantics,
- const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val));
- sema.inst_map.putAssumeCapacityNoClobber(inst, wrapped);
} else {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
+ if (try sema.resolveMaybeUndefVal(casted_arg)) |_| {
+ has_comptime_args.* = true;
+ }
+
arg_i.* += 1;
},
.param_anytype, .param_anytype_comptime => {
// No coercion needed.
const uncasted_arg = uncasted_args[arg_i.*];
new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg);
- const param_ty = sema.typeOf(uncasted_arg);
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
@@ -6901,16 +6926,14 @@ fn analyzeInlineCallArg(
.ty = sema.typeOf(uncasted_arg),
.val = arg_val,
};
- } else if (zir_tags[inst] == .param_anytype_comptime or try sema.typeRequiresComptime(param_ty)) {
- sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
- } else if (try sema.resolveMaybeUndefVal(uncasted_arg)) |val| {
- // We have a comptime value but we need a runtime value to preserve inlining semantics,
- const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val));
- sema.inst_map.putAssumeCapacityNoClobber(inst, wrapped);
} else {
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
}
+ if (try sema.resolveMaybeUndefVal(uncasted_arg)) |_| {
+ has_comptime_args.* = true;
+ }
+
arg_i.* += 1;
},
else => {},
@@ -7089,7 +7112,6 @@ fn instantiateGenericCall(
const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter);
const callee = if (!gop.found_existing) callee: {
const new_module_func = try gpa.create(Module.Fn);
- errdefer gpa.destroy(new_module_func);
// This ensures that we can operate on the hash map before the Module.Fn
// struct is fully initialized.
@@ -7097,7 +7119,6 @@ fn instantiateGenericCall(
new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional();
new_module_func.comptime_args = null;
gop.key_ptr.* = new_module_func;
- errdefer assert(mod.monomorphed_funcs.remove(new_module_func));
try namespace.anon_decls.ensureUnusedCapacity(gpa, 1);
@@ -7105,7 +7126,6 @@ fn instantiateGenericCall(
const src_decl_index = namespace.getDeclIndex();
const src_decl = mod.declPtr(src_decl_index);
const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope);
- errdefer mod.destroyDecl(new_decl_index);
const new_decl = mod.declPtr(new_decl_index);
// TODO better names for generic function instantiations
const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{
@@ -7125,216 +7145,59 @@ fn instantiateGenericCall(
new_decl.generation = mod.generation;
namespace.anon_decls.putAssumeCapacityNoClobber(new_decl_index, {});
- errdefer assert(namespace.anon_decls.orderedRemove(new_decl_index));
// The generic function Decl is guaranteed to be the first dependency
// of each of its instantiations.
assert(new_decl.dependencies.keys().len == 0);
try mod.declareDeclDependency(new_decl_index, module_fn.owner_decl);
- // Resolving the new function type below will possibly declare more decl dependencies
- // and so we remove them all here in case of error.
- errdefer {
- for (new_decl.dependencies.keys()) |dep_index| {
- const dep = mod.declPtr(dep_index);
- dep.removeDependant(new_decl_index);
- }
- }
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer new_decl_arena.deinit();
const new_decl_arena_allocator = new_decl_arena.allocator();
- // Re-run the block that creates the function, with the comptime parameters
- // pre-populated inside `inst_map`. This causes `param_comptime` and
- // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
- // new, monomorphized function, with the comptime parameters elided.
- var child_sema: Sema = .{
- .mod = mod,
- .gpa = gpa,
- .arena = sema.arena,
- .perm_arena = new_decl_arena_allocator,
- .code = fn_zir,
- .owner_decl = new_decl,
- .owner_decl_index = new_decl_index,
- .func = null,
- .fn_ret_ty = Type.void,
- .owner_func = null,
- .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
- .comptime_args_fn_inst = module_fn.zir_body_inst,
- .preallocated_new_func = new_module_func,
- .is_generic_instantiation = true,
- .branch_quota = sema.branch_quota,
- .branch_count = sema.branch_count,
- };
- defer child_sema.deinit();
-
- var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
- defer wip_captures.deinit();
-
- var child_block: Block = .{
- .parent = null,
- .sema = &child_sema,
- .src_decl = new_decl_index,
- .namespace = namespace,
- .wip_capture_scope = wip_captures.scope,
- .instructions = .{},
- .inlining = null,
- .is_comptime = true,
- };
- defer {
- child_block.instructions.deinit(gpa);
- child_block.params.deinit(gpa);
- }
-
- try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
-
- var arg_i: usize = 0;
- for (fn_info.param_body) |inst| {
- var is_comptime = false;
- var is_anytype = false;
- switch (zir_tags[inst]) {
- .param => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
- },
- .param_comptime => {
- is_comptime = true;
- },
- .param_anytype => {
- is_anytype = true;
- is_comptime = func_ty_info.paramIsComptime(arg_i);
- },
- .param_anytype_comptime => {
- is_anytype = true;
- is_comptime = true;
- },
- else => continue,
- }
- const arg = uncasted_args[arg_i];
- if (is_comptime) {
- const arg_val = (try sema.resolveMaybeUndefVal(arg)).?;
- const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
- } else if (is_anytype) {
- const arg_ty = sema.typeOf(arg);
- if (try sema.typeRequiresComptime(arg_ty)) {
- const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) {
- error.NeededSourceLocation => {
- const decl = sema.mod.declPtr(block.src_decl);
- const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src);
- _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known");
- unreachable;
- },
- else => |e| return e,
- };
- const child_arg = try child_sema.addConstant(arg_ty, arg_val);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ const new_func = sema.resolveGenericInstantiationType(
+ block,
+ new_decl_arena_allocator,
+ fn_zir,
+ new_decl,
+ new_decl_index,
+ uncasted_args,
+ module_fn,
+ new_module_func,
+ namespace,
+ func_ty_info,
+ call_src,
+ bound_arg_src,
+ ) catch |err| switch (err) {
+ error.GenericPoison, error.ComptimeReturn => {
+ new_decl_arena.deinit();
+ // Resolving the new function type below will possibly declare more decl dependencies
+ // and so we remove them all here in case of error.
+ for (new_decl.dependencies.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
+ dep.removeDependant(new_decl_index);
+ }
+ assert(namespace.anon_decls.orderedRemove(new_decl_index));
+ mod.destroyDecl(new_decl_index);
+ assert(mod.monomorphed_funcs.remove(new_module_func));
+ gpa.destroy(new_module_func);
+ return err;
+ },
+ else => {
+ {
+ errdefer new_decl_arena.deinit();
+ try new_decl.finalizeNewArena(&new_decl_arena);
+ }
+ // TODO look up the compile error that happened here and attach a note to it
+ // pointing here, at the generic instantiation callsite.
+ if (sema.owner_func) |owner_func| {
+ owner_func.state = .dependency_failure;
} else {
- // We insert into the map an instruction which is runtime-known
- // but has the type of the argument.
- const child_arg = try child_block.addArg(arg_ty);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ sema.owner_decl.analysis = .dependency_failure;
}
- }
- arg_i += 1;
- }
-
- // Save the error trace as our first action in the function.
- // If this is unnecessary after all, Liveness will clean it up for us.
- const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
- child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
- child_block.error_return_trace_index = error_return_trace_index;
-
- const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst) catch |err| {
- // TODO look up the compile error that happened here and attach a note to it
- // pointing here, at the generic instantiation callsite.
- if (sema.owner_func) |owner_func| {
- owner_func.state = .dependency_failure;
- } else {
- sema.owner_decl.analysis = .dependency_failure;
- }
- return err;
+ return err;
+ },
};
- const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, "") catch unreachable;
- const new_func = new_func_val.castTag(.function).?.data;
- errdefer new_func.deinit(gpa);
- assert(new_func == new_module_func);
-
- arg_i = 0;
- for (fn_info.param_body) |inst| {
- var is_comptime = false;
- switch (zir_tags[inst]) {
- .param => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
- },
- .param_comptime => {
- is_comptime = true;
- },
- .param_anytype => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
- },
- .param_anytype_comptime => {
- is_comptime = true;
- },
- else => continue,
- }
-
- // We populate the Type here regardless because it is needed by
- // `GenericCallAdapter.eql` as well as function body analysis.
- // Whether it is anytype is communicated by `isAnytypeParam`.
- const arg = child_sema.inst_map.get(inst).?;
- const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator);
-
- if (try sema.typeRequiresComptime(copied_arg_ty)) {
- is_comptime = true;
- }
-
- if (is_comptime) {
- const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?;
- child_sema.comptime_args[arg_i] = .{
- .ty = copied_arg_ty,
- .val = try arg_val.copy(new_decl_arena_allocator),
- };
- } else {
- child_sema.comptime_args[arg_i] = .{
- .ty = copied_arg_ty,
- .val = Value.initTag(.generic_poison),
- };
- }
-
- arg_i += 1;
- }
-
- try wip_captures.finalize();
-
- // Populate the Decl ty/val with the function and its type.
- new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator);
- // If the call evaluated to a return type that requires comptime, never mind
- // our generic instantiation. Instead we need to perform a comptime call.
- const new_fn_info = new_decl.ty.fnInfo();
- if (try sema.typeRequiresComptime(new_fn_info.return_type)) {
- return error.ComptimeReturn;
- }
- // Similarly, if the call evaluated to a generic type we need to instead
- // call it inline.
- if (new_fn_info.is_generic or new_fn_info.cc == .Inline) {
- return error.GenericPoison;
- }
-
- new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
- new_decl.@"align" = 0;
- new_decl.has_tv = true;
- new_decl.owns_tv = true;
- new_decl.analysis = .complete;
-
- log.debug("generic function '{s}' instantiated with type {}", .{
- new_decl.name, new_decl.ty.fmtDebug(),
- });
-
- // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
- // will be populated, ensuring it will have `analyzeBody` called with the ZIR
- // parameters mapped appropriately.
- try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
- try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
+ errdefer new_decl_arena.deinit();
try new_decl.finalizeNewArena(&new_decl_arena);
break :callee new_func;
@@ -7406,7 +7269,7 @@ fn instantiateGenericCall(
sema.appendRefsAssumeCapacity(runtime_args);
if (ensure_result_used) {
- try sema.ensureResultUsed(block, result, call_src);
+ try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
}
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
@@ -7414,6 +7277,218 @@ fn instantiateGenericCall(
return result;
}
+fn resolveGenericInstantiationType(
+ sema: *Sema,
+ block: *Block,
+ new_decl_arena_allocator: Allocator,
+ fn_zir: Zir,
+ new_decl: *Decl,
+ new_decl_index: Decl.Index,
+ uncasted_args: []const Air.Inst.Ref,
+ module_fn: *Module.Fn,
+ new_module_func: *Module.Fn,
+ namespace: *Namespace,
+ func_ty_info: Type.Payload.Function.Data,
+ call_src: LazySrcLoc,
+ bound_arg_src: ?LazySrcLoc,
+) !*Module.Fn {
+ const mod = sema.mod;
+ const gpa = sema.gpa;
+
+ const zir_tags = fn_zir.instructions.items(.tag);
+ const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
+
+ // Re-run the block that creates the function, with the comptime parameters
+ // pre-populated inside `inst_map`. This causes `param_comptime` and
+ // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
+ // new, monomorphized function, with the comptime parameters elided.
+ var child_sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = sema.arena,
+ .perm_arena = new_decl_arena_allocator,
+ .code = fn_zir,
+ .owner_decl = new_decl,
+ .owner_decl_index = new_decl_index,
+ .func = null,
+ .fn_ret_ty = Type.void,
+ .owner_func = null,
+ .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
+ .comptime_args_fn_inst = module_fn.zir_body_inst,
+ .preallocated_new_func = new_module_func,
+ .is_generic_instantiation = true,
+ .branch_quota = sema.branch_quota,
+ .branch_count = sema.branch_count,
+ };
+ defer child_sema.deinit();
+
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
+ defer wip_captures.deinit();
+
+ var child_block: Block = .{
+ .parent = null,
+ .sema = &child_sema,
+ .src_decl = new_decl_index,
+ .namespace = namespace,
+ .wip_capture_scope = wip_captures.scope,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ };
+ defer {
+ child_block.instructions.deinit(gpa);
+ child_block.params.deinit(gpa);
+ }
+
+ try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
+
+ var arg_i: usize = 0;
+ for (fn_info.param_body) |inst| {
+ var is_comptime = false;
+ var is_anytype = false;
+ switch (zir_tags[inst]) {
+ .param => {
+ is_comptime = func_ty_info.paramIsComptime(arg_i);
+ },
+ .param_comptime => {
+ is_comptime = true;
+ },
+ .param_anytype => {
+ is_anytype = true;
+ is_comptime = func_ty_info.paramIsComptime(arg_i);
+ },
+ .param_anytype_comptime => {
+ is_anytype = true;
+ is_comptime = true;
+ },
+ else => continue,
+ }
+ const arg = uncasted_args[arg_i];
+ if (is_comptime) {
+ const arg_val = (try sema.resolveMaybeUndefVal(arg)).?;
+ const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
+ child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ } else if (is_anytype) {
+ const arg_ty = sema.typeOf(arg);
+ if (try sema.typeRequiresComptime(arg_ty)) {
+ const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ const decl = sema.mod.declPtr(block.src_decl);
+ const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src);
+ _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known");
+ unreachable;
+ },
+ else => |e| return e,
+ };
+ const child_arg = try child_sema.addConstant(arg_ty, arg_val);
+ child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ } else {
+ // We insert into the map an instruction which is runtime-known
+ // but has the type of the argument.
+ const child_arg = try child_block.addInst(.{
+ .tag = .arg,
+ .data = .{ .arg = .{
+ .ty = try child_sema.addType(arg_ty),
+ .src_index = @intCast(u32, arg_i),
+ } },
+ });
+ child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ }
+ }
+ arg_i += 1;
+ }
+
+ // Save the error trace as our first action in the function.
+ // If this is unnecessary after all, Liveness will clean it up for us.
+ const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
+ child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
+ child_block.error_return_trace_index = error_return_trace_index;
+
+ const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
+ const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable;
+ const new_func = new_func_val.castTag(.function).?.data;
+ errdefer new_func.deinit(gpa);
+ assert(new_func == new_module_func);
+
+ arg_i = 0;
+ for (fn_info.param_body) |inst| {
+ var is_comptime = false;
+ switch (zir_tags[inst]) {
+ .param => {
+ is_comptime = func_ty_info.paramIsComptime(arg_i);
+ },
+ .param_comptime => {
+ is_comptime = true;
+ },
+ .param_anytype => {
+ is_comptime = func_ty_info.paramIsComptime(arg_i);
+ },
+ .param_anytype_comptime => {
+ is_comptime = true;
+ },
+ else => continue,
+ }
+
+ // We populate the Type here regardless because it is needed by
+ // `GenericCallAdapter.eql` as well as function body analysis.
+ // Whether it is anytype is communicated by `isAnytypeParam`.
+ const arg = child_sema.inst_map.get(inst).?;
+ const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator);
+
+ if (try sema.typeRequiresComptime(copied_arg_ty)) {
+ is_comptime = true;
+ }
+
+ if (is_comptime) {
+ const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?;
+ child_sema.comptime_args[arg_i] = .{
+ .ty = copied_arg_ty,
+ .val = try arg_val.copy(new_decl_arena_allocator),
+ };
+ } else {
+ child_sema.comptime_args[arg_i] = .{
+ .ty = copied_arg_ty,
+ .val = Value.initTag(.generic_poison),
+ };
+ }
+
+ arg_i += 1;
+ }
+
+ try wip_captures.finalize();
+
+ // Populate the Decl ty/val with the function and its type.
+ new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator);
+ // If the call evaluated to a return type that requires comptime, never mind
+ // our generic instantiation. Instead we need to perform a comptime call.
+ const new_fn_info = new_decl.ty.fnInfo();
+ if (try sema.typeRequiresComptime(new_fn_info.return_type)) {
+ return error.ComptimeReturn;
+ }
+ // Similarly, if the call evaluated to a generic type we need to instead
+ // call it inline.
+ if (new_fn_info.is_generic or new_fn_info.cc == .Inline) {
+ return error.GenericPoison;
+ }
+
+ new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
+ new_decl.@"align" = 0;
+ new_decl.has_tv = true;
+ new_decl.owns_tv = true;
+ new_decl.analysis = .complete;
+
+ log.debug("generic function '{s}' instantiated with type {}", .{
+ new_decl.name, new_decl.ty.fmtDebug(),
+ });
+
+ // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
+ // will be populated, ensuring it will have `analyzeBody` called with the ZIR
+ // parameters mapped appropriately.
+ try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
+ try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
+ return new_func;
+}
+
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isSimpleTuple()) return;
const tuple = ty.tupleFields();
@@ -7812,7 +7887,23 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (try sema.resolveMaybeUndefVal(operand)) |int_val| {
if (dest_ty.isNonexhaustiveEnum()) {
- return sema.addConstant(dest_ty, int_val);
+ var buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = dest_ty.intTagType(&buffer);
+ if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
+ return sema.addConstant(dest_ty, int_val);
+ }
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ src,
+ "int value '{}' out of range of non-exhaustive enum '{}'",
+ .{ int_val.fmtValue(sema.typeOf(operand), sema.mod), dest_ty.fmt(sema.mod) },
+ );
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, dest_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (int_val.isUndef()) {
return sema.failWithUseOfUndef(block, operand_src);
@@ -8404,6 +8495,7 @@ fn funcCommon(
) CompileError!Air.Inst.Ref {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
+ const func_src = LazySrcLoc.nodeOffset(src_node_offset);
var is_generic = bare_return_type.tag() == .generic_poison or
alignment == null or
@@ -8411,6 +8503,15 @@ fn funcCommon(
section == .generic or
cc == null;
+ if (var_args) {
+ if (is_generic) {
+ return sema.fail(block, func_src, "generic function cannot be variadic", .{});
+ }
+ if (cc.? != .C) {
+ return sema.fail(block, cc_src, "variadic function must have 'C' calling convention", .{});
+ }
+ }
+
var destroy_fn_on_error = false;
const new_func: *Module.Fn = new_func: {
if (!has_body) break :new_func undefined;
@@ -8798,7 +8899,9 @@ fn analyzeParameter(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!this_generic and is_noalias and !param.ty.isPtrAtRuntime()) {
+ if (!sema.is_generic_instantiation and !this_generic and is_noalias and
+ !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional()))
+ {
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
}
@@ -8841,6 +8944,11 @@ fn zirParam(
};
switch (err) {
error.GenericPoison => {
+ if (sema.inst_map.get(inst)) |_| {
+ // A generic function is about to evaluate to another generic function.
+ // Return an error instead.
+ return error.GenericPoison;
+ }
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
@@ -8857,6 +8965,11 @@ fn zirParam(
};
const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
error.GenericPoison => {
+ if (sema.inst_map.get(inst)) |_| {
+ // A generic function is about to evaluate to another generic function.
+ // Return an error instead.
+ return error.GenericPoison;
+ }
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
@@ -9201,7 +9314,7 @@ fn intCast(
// If the destination type is signed, then we need to double its
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
- const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, target);
+ const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod);
break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty);
} else dest_max_val;
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
@@ -11438,6 +11551,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
+ .save_err_ret_index,
=> {},
.@"unreachable" => break inst,
else => return,
@@ -11659,9 +11773,11 @@ fn zirShl(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.gte, bit_value, target)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
@@ -11677,9 +11793,11 @@ fn zirShl(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
@@ -11702,25 +11820,25 @@ fn zirShl(
const val = switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, target);
+ const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod);
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
break :val shifted.wrapped_result;
}
- if (shifted.overflowed.compareAllWithZero(.eq)) {
+ if (shifted.overflow_bit.compareAllWithZero(.eq)) {
break :val shifted.wrapped_result;
}
return sema.fail(block, src, "operation caused overflow", .{});
},
.shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
- try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, target),
+ try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod),
.shl => if (scalar_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
- try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, target),
+ try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod),
else => unreachable,
};
@@ -11843,9 +11961,11 @@ fn zirShr(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.gte, bit_value, target)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
@@ -11861,9 +11981,11 @@ fn zirShr(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
@@ -11879,12 +12001,12 @@ fn zirShr(
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
- const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target);
+ const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod);
if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
- const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, target);
+ const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod);
return sema.addConstant(lhs_ty, val);
} else {
break :rs lhs_src;
@@ -11968,7 +12090,6 @@ fn zirBitwise(
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
- const target = sema.mod.getTarget();
if (!is_int) {
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) });
@@ -11980,9 +12101,9 @@ fn zirBitwise(
if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| {
const result_val = switch (air_tag) {
- .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, target),
- .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, target),
- .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, target),
+ .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod),
+ .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod),
+ .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod),
else => unreachable,
};
return sema.addConstant(resolved_type, result_val);
@@ -12009,7 +12130,6 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand = try sema.resolveInst(inst_data.operand);
const operand_type = sema.typeOf(operand);
const scalar_type = operand_type.scalarType();
- const target = sema.mod.getTarget();
if (scalar_type.zigTypeTag() != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
@@ -12026,14 +12146,14 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf);
- elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target);
+ elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
return sema.addConstant(
operand_type,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
- const result_val = try val.bitwiseNot(operand_type, sema.arena, target);
+ const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod);
return sema.addConstant(operand_type, result_val);
}
}
@@ -12138,17 +12258,21 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_ty = sema.typeOf(rhs);
const src = inst_data.src();
- if (lhs_ty.isTuple() and rhs_ty.isTuple()) {
+ const lhs_is_tuple = lhs_ty.isTuple();
+ const rhs_is_tuple = rhs_ty.isTuple();
+ if (lhs_is_tuple and rhs_is_tuple) {
return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
}
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
- const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs) orelse {
+ const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
+ if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)});
};
- const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs) orelse {
+ const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
+ assert(!rhs_is_tuple);
return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)});
};
@@ -12218,8 +12342,16 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :p null;
};
- const runtime_src = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| rs: {
- if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| {
+ const runtime_src = if (switch (lhs_ty.zigTypeTag()) {
+ .Array, .Struct => try sema.resolveMaybeUndefVal(lhs),
+ .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
+ else => unreachable,
+ }) |lhs_val| rs: {
+ if (switch (rhs_ty.zigTypeTag()) {
+ .Array, .Struct => try sema.resolveMaybeUndefVal(rhs),
+ .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
+ else => unreachable,
+ }) |rhs_val| {
const lhs_sub_val = if (lhs_ty.isSinglePointer())
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
else
@@ -12234,18 +12366,24 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const element_vals = try sema.arena.alloc(Value, final_len_including_sent);
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, elem_i);
- const elem_val_inst = try sema.addConstant(lhs_info.elem_type, elem_val);
+ const lhs_elem_i = elem_i;
+ const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type;
+ const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.initTag(.unreachable_value);
+ const elem_val = if (elem_default_val.tag() == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val;
+ const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
- const coereced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
- element_vals[elem_i] = coereced_elem_val;
+ const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
+ element_vals[elem_i] = coerced_elem_val;
}
while (elem_i < result_len) : (elem_i += 1) {
- const elem_val = try rhs_sub_val.elemValue(sema.mod, sema.arena, elem_i - lhs_len);
- const elem_val_inst = try sema.addConstant(lhs_info.elem_type, elem_val);
+ const rhs_elem_i = elem_i - lhs_len;
+ const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type;
+ const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.initTag(.unreachable_value);
+ const elem_val = if (elem_default_val.tag() == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val;
+ const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
- const coereced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
- element_vals[elem_i] = coereced_elem_val;
+ const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
+ element_vals[elem_i] = coerced_elem_val;
}
if (res_sent_val) |sent_val| {
element_vals[result_len] = sent_val;
@@ -12310,7 +12448,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return block.addAggregateInit(result_ty, element_refs);
}
-fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) !?Type.ArrayInfo {
+fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag()) {
.Array => return operand_ty.arrayInfo(),
@@ -12336,6 +12474,16 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
.C => {},
}
},
+ .Struct => {
+ if (operand_ty.isTuple() and peer_ty.isIndexable()) {
+ assert(!peer_ty.isTuple());
+ return .{
+ .elem_type = peer_ty.elemType2(),
+ .sentinel = null,
+ .len = operand_ty.arrayLen(),
+ };
+ }
+ },
else => {},
}
return null;
@@ -12430,7 +12578,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
- const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs) orelse {
+ const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -12562,8 +12710,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// We handle float negation here to ensure negative zero is represented in the bits.
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty);
- const target = sema.mod.getTarget();
- return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, target));
+ return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
@@ -12655,7 +12802,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -12666,7 +12812,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// If lhs % rhs is 0, it doesn't matter.
const lhs_val = maybe_lhs_val orelse unreachable;
const rhs_val = maybe_rhs_val orelse unreachable;
- const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target) catch unreachable;
+ const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
@@ -12742,7 +12888,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
@@ -12751,7 +12897,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else {
@@ -12815,7 +12961,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -12860,24 +13005,24 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target);
+ const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
}
return sema.addConstant(resolved_type, res);
} else {
- const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target);
+ const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
return sema.addConstant(
resolved_type,
- try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -12980,7 +13125,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13040,12 +13184,12 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (is_int) {
return sema.addConstant(
resolved_type,
- try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -13097,7 +13241,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13154,7 +13297,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
@@ -13163,7 +13306,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -13341,7 +13484,6 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13418,7 +13560,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
return sema.addConstant(
resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
@@ -13447,7 +13589,11 @@ fn intRem(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -13517,7 +13663,6 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13549,7 +13694,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod),
);
}
break :rs lhs_src;
@@ -13573,7 +13718,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -13620,7 +13765,6 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13676,7 +13820,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -13701,25 +13845,37 @@ fn zirOverflowArithmetic(
const tracy = trace(@src());
defer tracy.end();
- const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
+ const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
- const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
- const lhs = try sema.resolveInst(extra.lhs);
- const rhs = try sema.resolveInst(extra.rhs);
- const ptr = try sema.resolveInst(extra.ptr);
+ const uncasted_lhs = try sema.resolveInst(extra.lhs);
+ const uncasted_rhs = try sema.resolveInst(extra.rhs);
- const lhs_ty = sema.typeOf(lhs);
- const rhs_ty = sema.typeOf(rhs);
+ const lhs_ty = sema.typeOf(uncasted_lhs);
+ const rhs_ty = sema.typeOf(uncasted_rhs);
const mod = sema.mod;
- const target = mod.getTarget();
- // Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen.
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- const dest_ty = lhs_ty;
+
+ const instructions = &[_]Air.Inst.Ref{ uncasted_lhs, uncasted_rhs };
+ const dest_ty = if (zir_tag == .shl_with_overflow)
+ lhs_ty
+ else
+ try sema.resolvePeerTypes(block, src, instructions, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
+
+ const rhs_dest_ty = if (zir_tag == .shl_with_overflow)
+ try sema.log2IntType(block, lhs_ty, src)
+ else
+ dest_ty;
+
+ const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
+ const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
+
if (dest_ty.scalarType().zigTypeTag() != .Int) {
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
}
@@ -13728,14 +13884,11 @@ fn zirOverflowArithmetic(
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
- // TODO: Remove and use `ov_ty` instead.
- // This is a temporary type used until overflow arithmetic properly returns `u1` instead of `bool`.
- const overflowed_ty = if (dest_ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, dest_ty.vectorLen(), Type.bool) else Type.bool;
-
- const result: struct {
- /// TODO: Rename to `overflow_bit` and make of type `u1`.
- overflowed: Air.Inst.Ref,
- wrapped: Air.Inst.Ref,
+
+ var result: struct {
+ inst: Air.Inst.Ref = .none,
+ wrapped: Value = Value.initTag(.unreachable_value),
+ overflow_bit: Value,
} = result: {
switch (zir_tag) {
.add_with_overflow => {
@@ -13744,24 +13897,22 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
- break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
+ break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try sema.intAddWithOverflow(lhs_val, rhs_val, dest_ty);
- const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
- const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
- break :result .{ .overflowed = overflowed, .wrapped = wrapped };
+ break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
@@ -13770,18 +13921,16 @@ fn zirOverflowArithmetic(
// Otherwise, if either result is undefined, both results are undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
+ break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
+ break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try sema.intSubWithOverflow(lhs_val, rhs_val, dest_ty);
- const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
- const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
- break :result .{ .overflowed = overflowed, .wrapped = wrapped };
+ break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
@@ -13792,9 +13941,9 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
- } else if (try sema.compareAll(lhs_val, .eq, Value.one, dest_ty)) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
+ } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) {
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs };
}
}
}
@@ -13802,9 +13951,9 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef()) {
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
- } else if (try sema.compareAll(rhs_val, .eq, Value.one, dest_ty)) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs };
+ } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) {
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
}
}
}
@@ -13812,13 +13961,11 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
- break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
+ break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
- const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target);
- const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
- const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
- break :result .{ .overflowed = overflowed, .wrapped = wrapped };
+ const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
+ break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
@@ -13828,24 +13975,22 @@ fn zirOverflowArithmetic(
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
+ break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
- break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
+ break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
- const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target);
- const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
- const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
- break :result .{ .overflowed = overflowed, .wrapped = wrapped };
+ const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod);
+ break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
@@ -13863,7 +14008,7 @@ fn zirOverflowArithmetic(
const runtime_src = if (maybe_lhs_val == null) lhs_src else rhs_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
- const tuple = try block.addInst(.{
+ return block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = try block.sema.addType(tuple_ty),
@@ -13873,16 +14018,32 @@ fn zirOverflowArithmetic(
}),
} },
});
+ };
- const wrapped = try sema.tupleFieldValByIndex(block, src, tuple, 0, tuple_ty);
- try sema.storePtr2(block, src, ptr, ptr_src, wrapped, src, .store);
+ if (result.inst != .none) {
+ if (try sema.resolveMaybeUndefVal(result.inst)) |some| {
+ result.wrapped = some;
+ result.inst = .none;
+ }
+ }
- const overflow_bit = try sema.tupleFieldValByIndex(block, src, tuple, 1, tuple_ty);
- return block.addBitCast(overflowed_ty, overflow_bit);
- };
+ if (result.inst == .none) {
+ const values = try sema.arena.alloc(Value, 2);
+ values[0] = result.wrapped;
+ values[1] = result.overflow_bit;
+ const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
+ return sema.addConstant(tuple_ty, tuple_val);
+ }
+
+ const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2);
+ element_refs[0] = result.inst;
+ element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit);
+ return block.addAggregateInit(tuple_ty, element_refs);
+}
- try sema.storePtr2(block, src, ptr, ptr_src, result.wrapped, src, .store);
- return result.overflowed;
+fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value {
+ if (ty.zigTypeTag() != .Vector) return val;
+ return Value.Tag.repeated.create(sema.arena, val);
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
@@ -13955,13 +14116,12 @@ fn analyzeArithmetic(
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
switch (zir_tag) {
.add => {
- // For integers:
+ // For integers:intAddSat
// If either of the operands are zero, then the other operand is
// returned, even if it is undefined.
// If either of the operands are undefined, it's a compile error
@@ -14056,7 +14216,7 @@ fn analyzeArithmetic(
const val = if (scalar_tag == .ComptimeInt)
try sema.intAdd(lhs_val, rhs_val, resolved_type)
else
- try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
@@ -14153,7 +14313,7 @@ fn analyzeArithmetic(
const val = if (scalar_tag == .ComptimeInt)
try sema.intSub(lhs_val, rhs_val, resolved_type)
else
- try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
@@ -14234,7 +14394,7 @@ fn analyzeArithmetic(
}
}
if (is_int) {
- const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target);
+ const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index);
@@ -14243,7 +14403,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod),
);
}
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
@@ -14287,7 +14447,7 @@ fn analyzeArithmetic(
}
return sema.addConstant(
resolved_type,
- try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod),
);
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
@@ -14329,9 +14489,9 @@ fn analyzeArithmetic(
}
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target)
+ try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod)
else
- try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
@@ -15316,7 +15476,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
Value.makeBool(is_generic),
// is_noalias: bool,
Value.makeBool(is_noalias),
- // arg_type: ?type,
+ // type: ?type,
param_ty_val,
};
param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields);
@@ -15690,14 +15850,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace());
- const field_values = try sema.arena.create([5]Value);
+ const field_values = try sema.arena.create([4]Value);
field_values.* = .{
- // layout: ContainerLayout,
- try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(std.builtin.Type.ContainerLayout.Auto),
- ),
-
// tag_type: type,
try Value.Tag.ty.create(sema.arena, int_tag_ty),
// fields: []const EnumField,
@@ -15767,7 +15921,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
union_field_fields.* = .{
// name: []const u8,
name_val,
- // field_type: type,
+ // type: type,
try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty),
// alignment: comptime_int,
try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment),
@@ -15880,7 +16034,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
struct_field_fields.* = .{
// name: []const u8,
name_val,
- // field_type: type,
+ // type: type,
try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty),
// default_value: ?*const anyopaque,
try default_val_ptr.copy(fields_anon_decl.arena()),
@@ -15925,7 +16079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
struct_field_fields.* = .{
// name: []const u8,
name_val,
- // field_type: type,
+ // type: type,
try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty),
// default_value: ?*const anyopaque,
try default_val_ptr.copy(fields_anon_decl.arena()),
@@ -16137,14 +16291,6 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
return sema.addType(res_ty);
}
-fn zirLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- const operand = try sema.resolveType(block, src, inst_data.operand);
- const res_ty = try sema.log2IntType(block, operand, src);
- return sema.addType(res_ty);
-}
-
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
switch (operand.zigTypeTag()) {
.ComptimeInt => return Type.comptime_int,
@@ -16356,6 +16502,15 @@ fn finishCondBr(
return Air.indexToRef(block_inst);
}
+fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
+ switch (ty.zigTypeTag()) {
+ .Optional, .Null, .Undefined => return,
+ .Pointer => if (ty.isPtrLikeOptional()) return,
+ else => {},
+ }
+ return sema.failWithExpectedOptionalType(block, src, ty);
+}
+
fn zirIsNonNull(
sema: *Sema,
block: *Block,
@@ -16367,6 +16522,7 @@ fn zirIsNonNull(
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
+ try sema.checkNullableType(block, src, sema.typeOf(operand));
return sema.analyzeIsNull(block, src, operand, true);
}
@@ -16381,6 +16537,7 @@ fn zirIsNonNullPtr(
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
+ try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2());
if ((try sema.resolveMaybeUndefVal(ptr)) == null) {
return block.addUnOp(.is_non_null_ptr, ptr);
}
@@ -16388,12 +16545,23 @@ fn zirIsNonNullPtr(
return sema.analyzeIsNull(block, src, loaded, true);
}
+fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
+ switch (ty.zigTypeTag()) {
+ .ErrorSet, .ErrorUnion, .Undefined => return,
+ else => return sema.fail(block, src, "expected error union type, found '{}'", .{
+ ty.fmt(sema.mod),
+ }),
+ }
+}
+
fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
+ try sema.checkErrorType(block, src, sema.typeOf(operand));
return sema.analyzeIsNonErr(block, inst_data.src(), operand);
}
@@ -16404,6 +16572,7 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
+ try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2());
const loaded = try sema.analyzeLoad(block, src, ptr, src);
return sema.analyzeIsNonErr(block, src, loaded);
}
@@ -16942,24 +17111,6 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
};
}
-fn zirOverflowArithmeticPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const elem_ty_src = inst_data.src();
- const elem_type = try sema.resolveType(block, elem_ty_src, inst_data.operand);
- const ty = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = elem_type,
- .@"addrspace" = .generic,
- .mutable = true,
- .@"allowzero" = false,
- .@"volatile" = false,
- .size = .One,
- });
- return sema.addType(ty);
-}
-
fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -17906,7 +18057,7 @@ fn zirUnaryMath(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptime eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value,
+ comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -17915,7 +18066,6 @@ fn zirUnaryMath(
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = sema.typeOf(operand);
- const target = sema.mod.getTarget();
switch (operand_ty.zigTypeTag()) {
.ComptimeFloat, .Float => {},
@@ -17942,7 +18092,7 @@ fn zirUnaryMath(
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try eval(elem_val, scalar_ty, sema.arena, target);
+ elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
return sema.addConstant(
result_ty,
@@ -17957,7 +18107,7 @@ fn zirUnaryMath(
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
if (operand_val.isUndef())
return sema.addConstUndef(operand_ty);
- const result_val = try eval(operand_val, operand_ty, sema.arena, target);
+ const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod);
return sema.addConstant(operand_ty, result_val);
}
@@ -18312,22 +18462,14 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
.Enum => {
const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
- // layout: ContainerLayout,
- const layout_val = struct_val[0];
// tag_type: type,
- const tag_type_val = struct_val[1];
+ const tag_type_val = struct_val[0];
// fields: []const EnumField,
- const fields_val = struct_val[2];
+ const fields_val = struct_val[1];
// decls: []const Declaration,
- const decls_val = struct_val[3];
+ const decls_val = struct_val[2];
// is_exhaustive: bool,
- const is_exhaustive_val = struct_val[4];
-
- // enum layout is always auto
- const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
- if (layout != .Auto) {
- return sema.fail(block, src, "reified enums must have a layout .Auto", .{});
- }
+ const is_exhaustive_val = struct_val[3];
// Decls
if (decls_val.sliceLen(mod) > 0) {
@@ -18574,8 +18716,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// TODO use reflection instead of magic numbers here
// name: []const u8
const name_val = field_struct_val[0];
- // field_type: type,
- const field_type_val = field_struct_val[1];
+ // type: type,
+ const type_val = field_struct_val[1];
// alignment: comptime_int,
const alignment_val = field_struct_val[2];
@@ -18609,7 +18751,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
}
var buffer: Value.ToTypeBuffer = undefined;
- const field_ty = try field_type_val.toType(&buffer).copy(new_decl_arena_allocator);
+ const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator);
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?),
@@ -18730,7 +18872,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const arg_is_generic = arg_val[0].toBool();
// is_noalias: bool,
const arg_is_noalias = arg_val[1].toBool();
- // arg_type: ?type,
+ // type: ?type,
const param_type_opt_val = arg_val[2];
if (arg_is_generic) {
@@ -18828,9 +18970,9 @@ fn reifyStruct(
// TODO use reflection instead of magic numbers here
// name: []const u8
const name_val = field_struct_val[0];
- // field_type: type,
- const field_type_val = field_struct_val[1];
- //default_value: ?*const anyopaque,
+ // type: type,
+ const type_val = field_struct_val[1];
+ // default_value: ?*const anyopaque,
const default_value_val = field_struct_val[2];
// is_comptime: bool,
const is_comptime_val = field_struct_val[3];
@@ -18893,7 +19035,7 @@ fn reifyStruct(
}
var buffer: Value.ToTypeBuffer = undefined;
- const field_ty = try field_type_val.toType(&buffer).copy(new_decl_arena_allocator);
+ const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator);
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = abi_align,
@@ -19031,6 +19173,79 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
});
}
+fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
+ const va_list_ty = try sema.getBuiltinType("VaList");
+ const va_list_ptr = try Type.ptr(sema.arena, sema.mod, .{
+ .pointee_type = va_list_ty,
+ .mutable = true,
+ .@"addrspace" = .generic,
+ });
+
+ const inst = try sema.resolveInst(zir_ref);
+ return sema.coerce(block, va_list_ptr, inst, src);
+}
+
+fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs);
+ const arg_ty = try sema.resolveType(block, ty_src, extra.rhs);
+
+ if (!try sema.validateExternType(arg_ty, .param_ty)) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
+ errdefer msg.destroy(sema.gpa);
+
+ const src_decl = sema.mod.declPtr(block.src_decl);
+ try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty);
+
+ try sema.addDeclaredHereNote(msg, arg_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addTyOp(.c_va_arg, arg_ty, va_list_ref);
+}
+
+fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
+ const va_list_ty = try sema.getBuiltinType("VaList");
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
+}
+
+fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addUnOp(.c_va_end, va_list_ref);
+}
+
+fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+
+ const va_list_ty = try sema.getBuiltinType("VaList");
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addInst(.{
+ .tag = .c_va_start,
+ .data = .{ .ty = va_list_ty },
+ });
+}
+
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -19114,8 +19329,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
_ = try sema.checkIntType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- const target = sema.mod.getTarget();
- const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, target, sema);
+ const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema);
return sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag() == .ComptimeFloat) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known");
@@ -19441,14 +19655,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (!is_vector) {
return sema.addConstant(
dest_ty,
- try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, target),
+ try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod),
);
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, target);
+ elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
return sema.addConstant(
dest_ty,
@@ -20186,8 +20400,13 @@ fn resolveExportOptions(
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known");
const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage);
- const section = try sema.fieldVal(block, src, options, "section", section_src);
- const section_val = try sema.resolveConstValue(block, section_src, section, "linksection of exported value must be comptime-known");
+ const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
+ const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
+ const section_ty = Type.initTag(.const_slice_u8);
+ const section = if (section_opt_val.optionalValue()) |section_val|
+ try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod)
+ else
+ null;
const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src);
const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known");
@@ -20203,14 +20422,10 @@ fn resolveExportOptions(
});
}
- if (!section_val.isNull()) {
- return sema.fail(block, section_src, "TODO: implement exporting with linksection", .{});
- }
-
return std.builtin.ExportOptions{
.name = name,
.linkage = linkage,
- .section = null, // TODO
+ .section = section,
.visibility = visibility,
};
}
@@ -20417,13 +20632,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
while (i < vec_len) : (i += 1) {
const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf);
switch (operation) {
- .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, target),
- .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, target),
- .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, target),
+ .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod),
+ .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod),
+ .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod),
.Min => accum = accum.numberMin(elem_val, target),
.Max => accum = accum.numberMax(elem_val, target),
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
- .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, target),
+ .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod),
}
}
return sema.addConstant(scalar_ty, accum);
@@ -20819,10 +21034,10 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Xchg => operand_val,
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
- .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target),
- .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target),
- .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target),
- .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target),
+ .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod),
+ .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod),
+ .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod),
+ .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod),
.Max => stored_val.numberMax (operand_val, target),
.Min => stored_val.numberMin (operand_val, target),
// zig fmt: on
@@ -20895,8 +21110,6 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src);
const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src);
- const target = sema.mod.getTarget();
-
const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1);
const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2);
const maybe_addend = try sema.resolveMaybeUndefVal(addend);
@@ -20912,7 +21125,7 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (maybe_addend) |addend_val| {
if (addend_val.isUndef()) return sema.addConstUndef(ty);
- const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, target);
+ const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
return sema.addConstant(ty, result_val);
} else {
break :rs addend_src;
@@ -21535,7 +21748,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
else => |e| return e,
};
break :blk cc_tv.val.toEnum(std.builtin.CallingConvention);
- } else std.builtin.CallingConvention.Unspecified;
+ } else if (sema.owner_decl.is_exported and has_body)
+ .C
+ else
+ .Unspecified;
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
const body_len = sema.code.extra[extra_index];
@@ -24511,8 +24727,9 @@ fn coerceExtra(
else => break :p,
}
if (inst_info.size == .Slice) {
- if (dest_info.sentinel == null or inst_info.sentinel == null or
- !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod))
+ assert(dest_info.sentinel == null);
+ if (inst_info.sentinel == null or
+ !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -24721,7 +24938,7 @@ fn coerceExtra(
}
break :int;
};
- const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, target, sema);
+ const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty, mod)) {
@@ -25952,6 +26169,30 @@ fn storePtr2(
try sema.requireRuntimeBlock(block, src, runtime_src);
try sema.queueFullTypeResolution(elem_ty);
+
+ if (ptr_ty.ptrInfo().data.vector_index == .runtime) {
+ const ptr_inst = Air.refToIndex(ptr).?;
+ const air_tags = sema.air_instructions.items(.tag);
+ if (air_tags[ptr_inst] == .ptr_elem_ptr) {
+ const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
+ const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
+ _ = try block.addInst(.{
+ .tag = .vector_store_elem,
+ .data = .{ .vector_store_elem = .{
+ .vector_ptr = bin_op.lhs,
+ .payload = try block.sema.addExtra(Air.Bin{
+ .lhs = bin_op.rhs,
+ .rhs = operand,
+ }),
+ } },
+ });
+ return;
+ }
+ return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
+ ptr_ty.fmt(sema.mod),
+ });
+ }
+
if (is_ret) {
_ = try block.addBinOp(.store, ptr, operand);
} else {
@@ -27827,6 +28068,19 @@ fn analyzeLoad(
}
}
+ if (ptr_ty.ptrInfo().data.vector_index == .runtime) {
+ const ptr_inst = Air.refToIndex(ptr).?;
+ const air_tags = sema.air_instructions.items(.tag);
+ if (air_tags[ptr_inst] == .ptr_elem_ptr) {
+ const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
+ const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
+ return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
+ }
+ return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
+ ptr_ty.fmt(sema.mod),
+ });
+ }
+
return block.addTyOp(.load, elem_ty, ptr);
}
@@ -28460,6 +28714,19 @@ fn cmpNumeric(
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
+ // Compare ints: const vs. undefined (or vice versa)
+ if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) {
+ try sema.resolveLazyValue(lhs_val);
+ if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| {
+ return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
+ }
+ } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) {
+ try sema.resolveLazyValue(rhs_val);
+ if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| {
+ return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
+ }
+ }
+
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(Type.bool);
}
@@ -28476,9 +28743,25 @@ fn cmpNumeric(
return Air.Inst.Ref.bool_false;
}
} else {
+ if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) {
+ // Compare ints: const vs. var
+ try sema.resolveLazyValue(lhs_val);
+ if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| {
+ return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
+ }
+ }
break :src rhs_src;
}
} else {
+ if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
+ if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) {
+ // Compare ints: var vs. const
+ try sema.resolveLazyValue(rhs_val);
+ if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| {
+ return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
+ }
+ }
+ }
break :src lhs_src;
}
};
@@ -28667,6 +28950,107 @@ fn cmpNumeric(
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
}
+/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int.
+/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result.
+/// If it cannot be determined, returns null.
+/// Otherwise returns a bool for the guaranteed comparison operation.
+fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool {
+ const rhs_info = rhs_ty.intInfo(target);
+ const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable;
+ const is_zero = vs_zero == .eq;
+ const is_negative = vs_zero == .lt;
+ const is_positive = vs_zero == .gt;
+
+ // Anything vs. zero-sized type has guaranteed outcome.
+ if (rhs_info.bits == 0) return switch (op) {
+ .eq, .lte, .gte => is_zero,
+ .neq, .lt, .gt => !is_zero,
+ };
+
+ // Special case for i1, which can only be 0 or -1.
+ // Zero and positive ints have guaranteed outcome.
+ if (rhs_info.bits == 1 and rhs_info.signedness == .signed) {
+ if (is_positive) return switch (op) {
+ .gt, .gte, .neq => true,
+ .lt, .lte, .eq => false,
+ };
+ if (is_zero) return switch (op) {
+ .gte => true,
+ .lt => false,
+ .gt, .lte, .eq, .neq => null,
+ };
+ }
+
+ // Negative vs. unsigned has guaranteed outcome.
+ if (rhs_info.signedness == .unsigned and is_negative) return switch (op) {
+ .eq, .gt, .gte => false,
+ .neq, .lt, .lte => true,
+ };
+
+ const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed);
+ const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj;
+
+ // No sized type can have more than 65535 bits.
+ // The RHS type operand is either a runtime value or sized (but undefined) constant.
+ if (req_bits > 65535) return switch (op) {
+ .lt, .lte => is_negative,
+ .gt, .gte => is_positive,
+ .eq => false,
+ .neq => true,
+ };
+ const fits = req_bits <= rhs_info.bits;
+
+ // Oversized int has guaranteed outcome.
+ switch (op) {
+ .eq => return if (!fits) false else null,
+ .neq => return if (!fits) true else null,
+ .lt, .lte => if (!fits) return is_negative,
+ .gt, .gte => if (!fits) return !is_negative,
+ }
+
+ // For any other comparison, we need to know if the LHS value is
+ // equal to the maximum or minimum possible value of the RHS type.
+ const edge: struct { min: bool, max: bool } = edge: {
+ if (is_zero and rhs_info.signedness == .unsigned) break :edge .{
+ .min = true,
+ .max = false,
+ };
+
+ if (req_bits != rhs_info.bits) break :edge .{
+ .min = false,
+ .max = false,
+ };
+
+ var ty_buffer: Type.Payload.Bits = .{
+ .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned },
+ .data = @intCast(u16, req_bits),
+ };
+ const ty = Type.initPayload(&ty_buffer.base);
+ const pop_count = lhs_val.popCount(ty, target);
+
+ if (is_negative) {
+ break :edge .{
+ .min = pop_count == 1,
+ .max = false,
+ };
+ } else {
+ break :edge .{
+ .min = false,
+ .max = pop_count == req_bits - sign_adj,
+ };
+ }
+ };
+
+ assert(fits);
+ return switch (op) {
+ .lt => if (edge.max) false else null,
+ .lte => if (edge.min) true else null,
+ .gt => if (edge.min) false else null,
+ .gte => if (edge.max) true else null,
+ .eq, .neq => unreachable,
+ };
+}
+
/// Asserts that lhs and rhs types are both vectors.
fn cmpVector(
sema: *Sema,
@@ -29396,6 +29780,12 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void {
const field_ptr = val.castTag(.comptime_field_ptr).?.data;
return sema.resolveLazyValue(field_ptr.field_val);
},
+ .eu_payload,
+ .opt_payload,
+ => {
+ const sub_val = val.cast(Value.Payload.SubValue).?.data;
+ return sema.resolveLazyValue(sub_val);
+ },
.@"union" => {
const union_val = val.castTag(.@"union").?.data;
return sema.resolveLazyValue(union_val.val);
@@ -29432,6 +29822,18 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
const payload_ty = ty.errorUnionPayload();
return sema.resolveTypeLayout(payload_ty);
},
+ .Fn => {
+ const info = ty.fnInfo();
+ if (info.is_generic) {
+ // Resolving of generic function types is deferred to when
+ // the function is instantiated.
+ return;
+ }
+ for (info.param_types) |param_ty| {
+ try sema.resolveTypeLayout(param_ty);
+ }
+ try sema.resolveTypeLayout(info.return_type);
+ },
else => {},
}
}
@@ -29877,7 +30279,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
.Fn => {
const info = ty.fnInfo();
if (info.is_generic) {
- // Resolving of generic function types is defeerred to when
+ // Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
@@ -29981,6 +30383,18 @@ fn resolveTypeFieldsStruct(
ty: Type,
struct_obj: *Module.Struct,
) CompileError!void {
+ switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) {
+ .file_failure,
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ => {
+ sema.owner_decl.analysis = .dependency_failure;
+ sema.owner_decl.generation = sema.mod.generation;
+ return error.AnalysisFail;
+ },
+ else => {},
+ }
switch (struct_obj.status) {
.none => {},
.field_types_wip => {
@@ -30001,10 +30415,23 @@ fn resolveTypeFieldsStruct(
}
struct_obj.status = .field_types_wip;
+ errdefer struct_obj.status = .none;
try semaStructFields(sema.mod, struct_obj);
}
fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) CompileError!void {
+ switch (sema.mod.declPtr(union_obj.owner_decl).analysis) {
+ .file_failure,
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ => {
+ sema.owner_decl.analysis = .dependency_failure;
+ sema.owner_decl.generation = sema.mod.generation;
+ return error.AnalysisFail;
+ },
+ else => {},
+ }
switch (union_obj.status) {
.none => {},
.field_types_wip => {
@@ -30025,6 +30452,7 @@ fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) Compi
}
union_obj.status = .field_types_wip;
+ errdefer union_obj.status = .none;
try semaUnionFields(sema.mod, union_obj);
union_obj.status = .have_field_types;
}
@@ -30089,9 +30517,7 @@ fn resolveInferredErrorSetTy(
fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
const decl_index = struct_obj.owner_decl;
- const file_scope = struct_obj.namespace.file_scope;
- if (file_scope.status != .success_zir) return error.AnalysisFail;
- const zir = file_scope.zir;
+ const zir = struct_obj.namespace.file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
@@ -31402,7 +31828,7 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, sema.air_extra.items.len);
inline for (fields) |field| {
- sema.air_extra.appendAssumeCapacity(switch (field.field_type) {
+ sema.air_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
Air.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
@@ -31987,7 +32413,11 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32021,7 +32451,11 @@ fn numberAddWrap(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.numberAddWrapScalar(lhs_elem, rhs_elem, ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32058,7 +32492,11 @@ fn intSub(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32092,7 +32530,11 @@ fn numberSubWrap(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.numberSubWrapScalar(lhs_elem, rhs_elem, ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32129,7 +32571,11 @@ fn floatAdd(
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32182,7 +32628,11 @@ fn floatSub(
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32236,12 +32686,16 @@ fn intSubWithOverflow(
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try sema.intSubWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
- overflowed_data[i] = of_math_result.overflowed;
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
+ overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return Value.OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
+ .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
};
}
@@ -32269,7 +32723,7 @@ fn intSubWithOverflowScalar(
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflowed = Value.makeBool(overflowed),
+ .overflow_bit = Value.boolToInt(overflowed),
.wrapped_result = wrapped_result,
};
}
@@ -32286,7 +32740,9 @@ fn floatToInt(
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatToIntScalar(block, src, val.indexVectorlike(i), elem_ty, int_ty.scalarType());
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
+ scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32519,7 +32975,7 @@ fn enumHasInt(
int: Value,
) CompileError!bool {
switch (ty.tag()) {
- .enum_nonexhaustive => return sema.intFitsInType(int, ty, null),
+ .enum_nonexhaustive => unreachable,
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
const tag_ty = enum_full.tag_ty;
@@ -32581,12 +33037,16 @@ fn intAddWithOverflow(
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try sema.intAddWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
- overflowed_data[i] = of_math_result.overflowed;
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
+ overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return Value.OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
+ .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
};
}
@@ -32614,7 +33074,7 @@ fn intAddWithOverflowScalar(
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const result = try Value.fromBigInt(sema.arena, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflowed = Value.makeBool(overflowed),
+ .overflow_bit = Value.boolToInt(overflowed),
.wrapped_result = result,
};
}
@@ -32633,7 +33093,11 @@ fn compareAll(
if (ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
- if (!(try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()))) {
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) {
return false;
}
}
@@ -32677,7 +33141,11 @@ fn compareVector(
assert(ty.zigTypeTag() == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const res_bool = try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType());
scalar.* = Value.makeBool(res_bool);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
@@ -32697,23 +33165,24 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const target = sema.mod.getTarget();
const parent_ty = ptr_ty.childType();
+ const VI = Type.Payload.Pointer.Data.VectorIndex;
+
const vector_info: struct {
- host_size: u16,
- bit_offset: u16,
- alignment: u32,
- } = if (parent_ty.tag() == .vector) blk: {
+ host_size: u16 = 0,
+ alignment: u32 = 0,
+ vector_index: VI = .none,
+ } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: {
const elem_bits = elem_ty.bitSize(target);
- const is_packed = elem_bits != 0 and (elem_bits & (elem_bits - 1)) != 0;
- // TODO: runtime-known index
- assert(!is_packed or offset != null);
- const is_packed_with_offset = is_packed and offset != null and offset.? != 0;
- const target_offset = if (is_packed_with_offset) (if (target.cpu.arch.endian() == .Big) (parent_ty.vectorLen() - 1 - offset.?) else offset.?) else 0;
+ if (elem_bits == 0) break :blk .{};
+ const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
+ if (!is_packed) break :blk .{};
+
break :blk .{
- .host_size = if (is_packed_with_offset) @intCast(u16, parent_ty.abiSize(target)) else 0,
- .bit_offset = if (is_packed_with_offset) @intCast(u16, elem_bits * target_offset) else 0,
- .alignment = if (is_packed_with_offset) @intCast(u16, parent_ty.abiAlignment(target)) else 0,
+ .host_size = @intCast(u16, parent_ty.arrayLen()),
+ .alignment = @intCast(u16, parent_ty.abiAlignment(target)),
+ .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime,
};
- } else .{ .host_size = 0, .bit_offset = 0, .alignment = 0 };
+ } else .{};
const alignment: u32 = a: {
// Calculate the new pointer alignment.
@@ -32741,6 +33210,6 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
.@"volatile" = ptr_info.@"volatile",
.@"align" = alignment,
.host_size = vector_info.host_size,
- .bit_offset = vector_info.bit_offset,
+ .vector_index = vector_info.vector_index,
});
}
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 805448d540..6e096ee90a 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -225,9 +225,7 @@ pub fn print(
.one => return writer.writeAll("1"),
.void_value => return writer.writeAll("{}"),
.unreachable_value => return writer.writeAll("unreachable"),
- .the_only_possible_value => {
- val = ty.onePossibleValue().?;
- },
+ .the_only_possible_value => return writer.writeAll("0"),
.bool_true => return writer.writeAll("true"),
.bool_false => return writer.writeAll("false"),
.ty => return val.castTag(.ty).?.data.print(writer, mod),
diff --git a/src/Zir.zig b/src/Zir.zig
index 6e7164878c..ffe1f4c345 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -71,7 +71,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => code.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
@@ -539,9 +539,6 @@ pub const Inst = struct {
/// Obtains the return type of the in-scope function.
/// Uses the `node` union field.
ret_type,
- /// Create a pointer type for overflow arithmetic.
- /// TODO remove when doing https://github.com/ziglang/zig/issues/10248
- overflow_arithmetic_ptr,
/// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range.
/// Uses the `ptr_type` union field.
ptr_type,
@@ -600,9 +597,6 @@ pub const Inst = struct {
/// Returns the integer type for the RHS of a shift operation.
/// Uses the `un_node` field.
typeof_log2_int_type,
- /// Given an integer type, returns the integer type for the RHS of a shift operation.
- /// Uses the `un_node` field.
- log2_int_type,
/// Asserts control-flow will not reach this instruction (`unreachable`).
/// Uses the `unreachable` union field.
@"unreachable",
@@ -1121,7 +1115,6 @@ pub const Inst = struct {
.err_union_code,
.err_union_code_ptr,
.ptr_type,
- .overflow_arithmetic_ptr,
.enum_literal,
.merge_error_sets,
.error_union_type,
@@ -1132,7 +1125,6 @@ pub const Inst = struct {
.slice_sentinel,
.import,
.typeof_log2_int_type,
- .log2_int_type,
.resolve_inferred_alloc,
.set_eval_branch_quota,
.switch_capture,
@@ -1422,7 +1414,6 @@ pub const Inst = struct {
.err_union_code,
.err_union_code_ptr,
.ptr_type,
- .overflow_arithmetic_ptr,
.enum_literal,
.merge_error_sets,
.error_union_type,
@@ -1433,7 +1424,6 @@ pub const Inst = struct {
.slice_sentinel,
.import,
.typeof_log2_int_type,
- .log2_int_type,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
@@ -1664,7 +1654,6 @@ pub const Inst = struct {
.ret_err_value_code = .str_tok,
.ret_ptr = .node,
.ret_type = .node,
- .overflow_arithmetic_ptr = .un_node,
.ptr_type = .ptr_type,
.slice_start = .pl_node,
.slice_end = .pl_node,
@@ -1678,7 +1667,6 @@ pub const Inst = struct {
.negate_wrap = .un_node,
.typeof = .un_node,
.typeof_log2_int_type = .un_node,
- .log2_int_type = .un_node,
.@"unreachable" = .@"unreachable",
.xor = .pl_node,
.optional_type = .un_node,
@@ -1916,19 +1904,19 @@ pub const Inst = struct {
/// The AST node is the builtin call.
typeof_peer,
/// Implements the `@addWithOverflow` builtin.
- /// `operand` is payload index to `OverflowArithmetic`.
+ /// `operand` is payload index to `BinNode`.
/// `small` is unused.
add_with_overflow,
/// Implements the `@subWithOverflow` builtin.
- /// `operand` is payload index to `OverflowArithmetic`.
+ /// `operand` is payload index to `BinNode`.
/// `small` is unused.
sub_with_overflow,
/// Implements the `@mulWithOverflow` builtin.
- /// `operand` is payload index to `OverflowArithmetic`.
+ /// `operand` is payload index to `BinNode`.
/// `small` is unused.
mul_with_overflow,
/// Implements the `@shlWithOverflow` builtin.
- /// `operand` is payload index to `OverflowArithmetic`.
+ /// `operand` is payload index to `BinNode`.
/// `small` is unused.
shl_with_overflow,
/// `operand` is payload index to `UnNode`.
@@ -1993,6 +1981,18 @@ pub const Inst = struct {
/// Implement the builtin `@addrSpaceCast`
/// `Operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
addrspace_cast,
+ /// Implement builtin `@cVaArg`.
+ /// `operand` is payload index to `BinNode`.
+ c_va_arg,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is payload index to `UnNode`.
+ c_va_copy,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is payload index to `UnNode`.
+ c_va_end,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is `src_node: i32`.
+ c_va_start,
pub const InstData = struct {
opcode: Extended,
@@ -3418,13 +3418,6 @@ pub const Inst = struct {
field_name: Ref,
};
- pub const OverflowArithmetic = struct {
- node: i32,
- lhs: Ref,
- rhs: Ref,
- ptr: Ref,
- };
-
pub const Cmpxchg = struct {
node: i32,
ptr: Ref,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index e09664598a..b333ffc666 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -433,7 +433,7 @@ pub fn generate(
.prev_di_pc = 0,
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
- .stack_size = mem.alignForwardGeneric(u32, function.max_end_stack, function.stack_align),
+ .stack_size = function.max_end_stack,
.saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
@@ -477,7 +477,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
- self.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+ self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
@@ -560,6 +560,7 @@ fn gen(self: *Self) !void {
const total_stack_size = self.max_end_stack + self.saved_regs_stack_space;
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
+ self.max_end_stack = stack_size;
if (math.cast(u12, stack_size)) |size| {
self.mir_instructions.set(backpatch_reloc, .{
.tag = .sub_immediate,
@@ -873,6 +874,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+ .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -976,11 +983,16 @@ fn allocMem(
assert(abi_size > 0);
assert(abi_align > 0);
- if (abi_align > self.stack_align)
- self.stack_align = abi_align;
+ // In order to efficiently load and store stack items that fit
+ // into registers, we bump up the alignment to the next power of
+ // two.
+ const adjusted_align = if (abi_size > 8)
+ abi_align
+ else
+ std.math.ceilPowerOfTwoAssert(u32, abi_size);
// TODO find a free slot instead of always appending
- const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
+ const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size;
self.next_stack_offset = offset;
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
@@ -3044,19 +3056,60 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
/// Given an error union, returns the error
-fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+fn errUnionErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 };
}
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- return error_union_mcv;
+ return try error_union_bind.resolveToMcv(self);
}
const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
- switch (error_union_mcv) {
- .register => return self.fail("TODO errUnionErr for registers", .{}),
+ switch (try error_union_bind.resolveToMcv(self)) {
+ .register => {
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
+
+ const err_bit_offset = err_offset * 8;
+ const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8;
+
+ _ = try self.addInst(.{
+ .tag = .ubfx, // errors are unsigned integers
+ .data = .{
+ .rr_lsb_width = .{
+ // Set both registers to the X variant to get the full width
+ .rd = dest_reg.toX(),
+ .rn = operand_reg.toX(),
+ .lsb = @intCast(u6, err_bit_offset),
+ .width = @intCast(u7, err_bit_size),
+ },
+ },
+ });
+
+ return MCValue{ .register = dest_reg };
+ },
.stack_argument_offset => |off| {
return MCValue{ .stack_argument_offset = off + err_offset };
},
@@ -3073,27 +3126,69 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const error_union_ty = self.air.typeOf(ty_op.operand);
- const mcv = try self.resolveInst(ty_op.operand);
- break :result try self.errUnionErr(mcv, error_union_ty);
+
+ break :result try self.errUnionErr(error_union_bind, error_union_ty, inst);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
/// Given an error union, returns the payload
-fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+fn errUnionPayload(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
- return error_union_mcv;
+ return try error_union_bind.resolveToMcv(self);
}
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return MCValue.none;
}
const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
- switch (error_union_mcv) {
- .register => return self.fail("TODO errUnionPayload for registers", .{}),
+ switch (try error_union_bind.resolveToMcv(self)) {
+ .register => {
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
+
+ const payload_bit_offset = payload_offset * 8;
+ const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8;
+
+ _ = try self.addInst(.{
+ .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .data = .{
+ .rr_lsb_width = .{
+ // Set both registers to the X variant to get the full width
+ .rd = dest_reg.toX(),
+ .rn = operand_reg.toX(),
+ .lsb = @intCast(u5, payload_bit_offset),
+ .width = @intCast(u6, payload_bit_size),
+ },
+ },
+ });
+
+ return MCValue{ .register = dest_reg };
+ },
.stack_argument_offset => |off| {
return MCValue{ .stack_argument_offset = off + payload_offset };
},
@@ -3110,9 +3205,10 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const error_union_ty = self.air.typeOf(ty_op.operand);
- const error_union = try self.resolveInst(ty_op.operand);
- break :result try self.errUnionPayload(error_union, error_union_ty);
+
+ break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3393,9 +3489,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch});
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
+ const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+
+ break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst);
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -4037,9 +4138,24 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- _ = extra;
- return self.fail("TODO implement codegen airFieldParentPtr", .{});
+ const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const field_ptr = try self.resolveInst(extra.field_ptr);
+ const struct_ty = self.air.getRefType(ty_pl.ty).childType();
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*));
+ switch (field_ptr) {
+ .ptr_stack_offset => |off| {
+ break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
+ },
+ else => {
+ const lhs_bind: ReadArg.Bind = .{ .mcv = field_ptr };
+ const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } };
+
+ break :result try self.addSub(.sub, lhs_bind, rhs_bind, Type.usize, Type.usize, null);
+ },
+ }
+ };
+ return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
@@ -4048,7 +4164,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const result = self.args[arg_index];
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
+ const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
const mcv = switch (result) {
// Copy registers to the stack
@@ -4771,19 +4888,27 @@ fn isNonNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue
return MCValue{ .compare_flags = is_null_res.compare_flags.negate() };
}
-fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const error_type = ty.errorUnionSet();
+fn isErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+) !MCValue {
+ const error_type = error_union_ty.errorUnionSet();
if (error_type.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 }; // always false
}
- const error_mcv = try self.errUnionErr(operand, ty);
+ const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null);
return try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .gt);
}
-fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const is_err_result = try self.isErr(ty, operand);
+fn isNonErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+) !MCValue {
+ const is_err_result = try self.isErr(error_union_bind, error_union_ty);
switch (is_err_result) {
.compare_flags => |cond| {
assert(cond == .hi);
@@ -4852,9 +4977,10 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isErr(ty, operand);
+ const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
+ const error_union_ty = self.air.typeOf(un_op);
+
+ break :result try self.isErr(error_union_bind, error_union_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4869,7 +4995,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isErr(elem_ty, operand);
+ break :result try self.isErr(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4877,9 +5003,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNonErr(ty, operand);
+ const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
+ const error_union_ty = self.air.typeOf(un_op);
+
+ break :result try self.isNonErr(error_union_bind, error_union_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4894,7 +5021,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isNonErr(elem_ty, operand);
+ break :result try self.isNonErr(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -5275,7 +5402,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5283,7 +5410,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
+ switch (abi_size) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -5305,6 +5432,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.register => |reg| {
switch (abi_size) {
1, 2, 4, 8 => {
+ assert(std.mem.isAlignedGeneric(u32, stack_offset, abi_size));
+
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb_stack,
2 => .strh_stack,
@@ -5317,7 +5446,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = rt,
- .offset = @intCast(u32, stack_offset),
+ .offset = stack_offset,
} },
});
},
@@ -5880,9 +6009,10 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
- const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
@@ -5939,15 +6069,24 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const result: MCValue = result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.air.typeOf(pl_op.operand);
- const error_union = try self.resolveInst(pl_op.operand);
- const is_err_result = try self.isErr(error_union_ty, error_union);
+ const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
+ const error_union_align = error_union_ty.abiAlignment(self.target.*);
+
+ // The error union will die in the body. However, we need the
+ // error union after the body in order to extract the payload
+ // of the error union, so we create a copy of it
+ const error_union_copy = try self.allocMem(error_union_size, error_union_align, null);
+ try self.genSetStack(error_union_ty, error_union_copy, try error_union_bind.resolveToMcv(self));
+
+ const is_err_result = try self.isErr(error_union_bind, error_union_ty);
const reloc = try self.condBr(is_err_result);
try self.genBody(body);
-
try self.performReloc(reloc);
- break :result try self.errUnionPayload(error_union, error_union_ty);
+
+ break :result try self.errUnionPayload(.{ .mcv = .{ .stack_offset = error_union_copy } }, error_union_ty, null);
};
return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index e5541a5988..cc478c874a 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -505,7 +505,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig
index 9b5cacc98e..0c48f33ea1 100644
--- a/src/arch/aarch64/abi.zig
+++ b/src/arch/aarch64/abi.zig
@@ -136,19 +136,10 @@ pub fn getFloatArrayType(ty: Type) ?Type {
}
}
-const callee_preserved_regs_impl = if (builtin.os.tag.isDarwin()) struct {
- pub const callee_preserved_regs = [_]Register{
- .x20, .x21, .x22, .x23,
- .x24, .x25, .x26, .x27,
- .x28,
- };
-} else struct {
- pub const callee_preserved_regs = [_]Register{
- .x19, .x20, .x21, .x22, .x23,
- .x24, .x25, .x26, .x27, .x28,
- };
+pub const callee_preserved_regs = [_]Register{
+ .x19, .x20, .x21, .x22, .x23,
+ .x24, .x25, .x26, .x27, .x28,
};
-pub const callee_preserved_regs = callee_preserved_regs_impl.callee_preserved_regs;
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
@@ -161,10 +152,10 @@ const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = struct {
pub const gp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
- set.setRangeValue(.{
- .start = 0,
- .end = callee_preserved_regs.len,
- }, true);
+ for (callee_preserved_regs) |reg| {
+ const index = RegisterManager.indexOfRegIntoTracked(reg).?;
+ set.set(index);
+ }
break :blk set;
};
};
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index 20b33c01c5..0baa7e65ce 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -4,13 +4,15 @@ const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
+/// Disjoint sets of registers. Every register must belong to
+/// exactly one register class.
pub const RegisterClass = enum {
general_purpose,
stack_pointer,
floating_point,
};
-/// General purpose registers in the AArch64 instruction set
+/// Registers in the AArch64 instruction set
pub const Register = enum(u8) {
// zig fmt: off
// 64-bit general-purpose registers
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 8197d85b48..73c97fb7e5 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -386,7 +386,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
- self.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+ self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
@@ -783,6 +783,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+ .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -4031,8 +4037,9 @@ fn genInlineMemsetCode(
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, arg_index: u32) error{OutOfMemory}!void {
const mcv = self.args[arg_index];
- const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
+ const arg = self.air.instructions.items(.data)[inst].arg;
+ const ty = self.air.getRefType(arg.ty);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
switch (self.debug_output) {
.dwarf => |dw| {
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 3478d8dc58..07a8384c2c 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -283,7 +283,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 58661926bb..2c63f171ad 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -340,7 +340,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
- self.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+ self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
@@ -697,6 +697,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+ .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -1602,9 +1608,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement codegen airFieldParentPtr", .{});
}
-fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32) !void {
- const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
+fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
+ const arg = self.air.instructions.items(.data)[inst].arg;
+ const ty = self.air.getRefType(arg.ty);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
@@ -1634,7 +1641,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
// TODO support stack-only arguments
// TODO Copy registers to the stack
const mcv = result;
- try self.genArgDbgInfo(inst, mcv, @intCast(u32, arg_index));
+ try self.genArgDbgInfo(inst, mcv);
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 5df3a86229..97accb7642 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -132,7 +132,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 6eac6ba4fc..943d21c47b 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -714,6 +714,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
.error_set_has_value => @panic("TODO implement error_set_has_value"),
+ .vector_store_elem => @panic("TODO implement vector_store_elem"),
+
+ .c_va_arg => @panic("TODO implement c_va_arg"),
+ .c_va_copy => @panic("TODO implement c_va_copy"),
+ .c_va_end => @panic("TODO implement c_va_end"),
+ .c_va_start => @panic("TODO implement c_va_start"),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -1010,7 +1016,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
}
};
- try self.genArgDbgInfo(inst, mcv, @intCast(u32, arg_index));
+ try self.genArgDbgInfo(inst, mcv);
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
@@ -3401,9 +3407,10 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
self.finishAirBookkeeping();
}
-fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32) !void {
- const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
+fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
+ const arg = self.air.instructions.items(.data)[inst].arg;
+ const ty = self.air.getRefType(arg.ty);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig
index 744bd84943..f854152a2f 100644
--- a/src/arch/sparc64/Mir.zig
+++ b/src/arch/sparc64/Mir.zig
@@ -347,7 +347,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 99c0facb1a..c27639e14a 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -905,7 +905,9 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
}
fn addExtended(func: *CodeGen, opcode: wasm.PrefixedOpcode) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = .extended, .secondary = @enumToInt(opcode), .data = .{ .tag = {} } });
+ const extra_index = @intCast(u32, func.mir_extra.items.len);
+ try func.mir_extra.append(func.gpa, @enumToInt(opcode));
+ try func.addInst(.{ .tag = .extended, .data = .{ .payload = extra_index } });
}
fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
@@ -960,7 +962,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, func.mir_extra.items.len);
inline for (fields) |field| {
- func.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+ func.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
});
@@ -1971,6 +1973,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.is_named_enum_value,
.error_set_has_value,
.addrspace_cast,
+ .vector_store_elem,
+ .c_va_arg,
+ .c_va_copy,
+ .c_va_end,
+ .c_va_start,
=> |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
.add_optimized,
@@ -2213,6 +2220,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr_ty = func.air.typeOf(bin_op.lhs);
const ptr_info = ptr_ty.ptrInfo().data;
const ty = ptr_ty.childType();
+
if (ptr_info.host_size == 0) {
try func.store(lhs, rhs, ty, 0);
} else {
@@ -2466,8 +2474,8 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (func.debug_output) {
.dwarf => |dwarf| {
- // TODO: Get the original arg index rather than wasm arg index
- const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, arg_index);
+ const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
+ const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 45e02e0986..71d21d2797 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -423,9 +423,40 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
- const opcode = emit.mir.instructions.items(.secondary)[inst];
+ const extra_index = emit.mir.instructions.items(.data)[inst].payload;
+ const opcode = emit.mir.extra[extra_index];
+ const writer = emit.code.writer();
+ try emit.code.append(0xFC);
+ try leb128.writeULEB128(writer, opcode);
switch (@intToEnum(std.wasm.PrefixedOpcode, opcode)) {
- .memory_fill => try emit.emitMemFill(),
+ // bulk-memory opcodes
+ .data_drop => {
+ const segment = emit.mir.extra[extra_index + 1];
+ try leb128.writeULEB128(writer, segment);
+ },
+ .memory_init => {
+ const segment = emit.mir.extra[extra_index + 1];
+ try leb128.writeULEB128(writer, segment);
+ try leb128.writeULEB128(writer, @as(u32, 0)); // memory index
+ },
+ .memory_fill => {
+ try leb128.writeULEB128(writer, @as(u32, 0)); // memory index
+ },
+ .memory_copy => {
+ try leb128.writeULEB128(writer, @as(u32, 0)); // dst memory index
+ try leb128.writeULEB128(writer, @as(u32, 0)); // src memory index
+ },
+
+ // nontrapping-float-to-int-conversion opcodes
+ .i32_trunc_sat_f32_s,
+ .i32_trunc_sat_f32_u,
+ .i32_trunc_sat_f64_s,
+ .i32_trunc_sat_f64_u,
+ .i64_trunc_sat_f32_s,
+ .i64_trunc_sat_f32_u,
+ .i64_trunc_sat_f64_s,
+ .i64_trunc_sat_f64_u,
+ => {}, // opcode already written
else => |tag| return emit.fail("TODO: Implement extension instruction: {s}\n", .{@tagName(tag)}),
}
}
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 0f33dd9350..2d59c09e18 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -19,9 +19,6 @@ extra: []const u32,
pub const Inst = struct {
/// The opcode that represents this instruction
tag: Tag,
- /// This opcode will be set when `tag` represents an extended
- /// instruction with prefix 0xFC, or a simd instruction with prefix 0xFD.
- secondary: u8 = 0,
/// Data is determined by the set `tag`.
/// For example, `data` will be an i32 for when `tag` is 'i32_const'.
data: Data,
@@ -513,10 +510,11 @@ pub const Inst = struct {
i64_extend16_s = 0xC3,
/// Uses `tag`
i64_extend32_s = 0xC4,
- /// The instruction consists of an extension opcode
- /// set in `secondary`
+ /// The instruction consists of an extension opcode.
+ /// The prefixed opcode can be found at payload's index.
///
- /// The `data` field depends on the extension instruction
+ /// The `data` field depends on the extension instruction and
+ /// may contain additional data.
extended = 0xFC,
/// The instruction consists of a simd opcode.
/// The actual simd-opcode is found at payload's index.
@@ -589,7 +587,7 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => self.extra[i],
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
};
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index bfb60bf74a..2ad31bf7ba 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -374,7 +374,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
- self.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+ self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
@@ -785,6 +785,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+ .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -3793,7 +3799,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const mcv = self.args[arg_index];
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
+ const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 0bc5fd0b04..af3ed5e053 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1593,41 +1593,41 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.jnae => if (is_one_byte) OpCode.init(&.{0x72}) else OpCode.init(&.{0x0f,0x82}),
.jnb,
- .jnc,
+ .jnc,
.jae => if (is_one_byte) OpCode.init(&.{0x73}) else OpCode.init(&.{0x0f,0x83}),
- .je,
+ .je,
.jz => if (is_one_byte) OpCode.init(&.{0x74}) else OpCode.init(&.{0x0f,0x84}),
- .jne,
+ .jne,
.jnz => if (is_one_byte) OpCode.init(&.{0x75}) else OpCode.init(&.{0x0f,0x85}),
- .jna,
+ .jna,
.jbe => if (is_one_byte) OpCode.init(&.{0x76}) else OpCode.init(&.{0x0f,0x86}),
- .jnbe,
+ .jnbe,
.ja => if (is_one_byte) OpCode.init(&.{0x77}) else OpCode.init(&.{0x0f,0x87}),
.js => if (is_one_byte) OpCode.init(&.{0x78}) else OpCode.init(&.{0x0f,0x88}),
.jns => if (is_one_byte) OpCode.init(&.{0x79}) else OpCode.init(&.{0x0f,0x89}),
- .jpe,
+ .jpe,
.jp => if (is_one_byte) OpCode.init(&.{0x7a}) else OpCode.init(&.{0x0f,0x8a}),
- .jpo,
+ .jpo,
.jnp => if (is_one_byte) OpCode.init(&.{0x7b}) else OpCode.init(&.{0x0f,0x8b}),
- .jnge,
+ .jnge,
.jl => if (is_one_byte) OpCode.init(&.{0x7c}) else OpCode.init(&.{0x0f,0x8c}),
- .jge,
+ .jge,
.jnl => if (is_one_byte) OpCode.init(&.{0x7d}) else OpCode.init(&.{0x0f,0x8d}),
- .jle,
+ .jle,
.jng => if (is_one_byte) OpCode.init(&.{0x7e}) else OpCode.init(&.{0x0f,0x8e}),
- .jg,
+ .jg,
.jnle => if (is_one_byte) OpCode.init(&.{0x7f}) else OpCode.init(&.{0x0f,0x8f}),
else => unreachable,
@@ -1667,10 +1667,10 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.setp,
.setpe => OpCode.init(&.{0x0f,0x9a}),
- .setnp,
+ .setnp,
.setpo => OpCode.init(&.{0x0f,0x9b}),
- .setl,
+ .setl,
.setnge => OpCode.init(&.{0x0f,0x9c}),
.setnl,
@@ -1778,7 +1778,7 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.cmovbe,
.cmovna, => OpCode.init(&.{0x0f,0x46}),
- .cmove,
+ .cmove,
.cmovz, => OpCode.init(&.{0x0f,0x44}),
.cmovg,
@@ -1840,7 +1840,7 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
else => unreachable,
},
.vm => return switch (tag) {
- .vmovsd,
+ .vmovsd,
.vmovss => OpCode.init(&.{0x10}),
.vucomisd,
.vucomiss => OpCode.init(&.{0x2e}),
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 5bd8dc68f6..df2052ca6e 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -612,7 +612,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
+ @field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 393d4db3d5..54c08e4aa9 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -143,7 +143,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
.integer, .integer, .integer, .integer,
.integer, .integer, .integer, .integer,
};
- if (has_avx512 and bit_size <= 256) return .{
+ const has_avx = target.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.avx));
+ if (has_avx and bit_size <= 256) return .{
.integer, .integer, .integer, .integer,
.none, .none, .none, .none,
};
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index 995ef353fa..6db3489a47 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -3932,7 +3932,14 @@ sepd1("include-pch"),
flagpd1("index-header-map"),
sepd1("init"),
flagpd1("init-only"),
-sepd1("install_name"),
+.{
+ .name = "install_name",
+ .syntax = .separate,
+ .zig_equivalent = .install_name,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
flagpd1("keep_private_externs"),
sepd1("lazy_framework"),
sepd1("lazy_library"),
@@ -6326,7 +6333,14 @@ joinpd1("ftabstop="),
jspd1("idirafter"),
joinpd1("mregparm="),
joinpd1("sycl-std="),
-jspd1("undefined"),
+.{
+ .name = "undefined",
+ .syntax = .joined_or_separate,
+ .zig_equivalent = .undefined,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
.{
.name = "extdirs=",
.syntax = .joined,
diff --git a/src/codegen.zig b/src/codegen.zig
index bc50f36041..e8dd661684 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -854,6 +854,14 @@ pub fn generateSymbol(
}
return Result{ .appended = {} };
},
+ .str_lit => {
+ const str_lit = typed_value.val.castTag(.str_lit).?.data;
+ const mod = bin_file.options.module.?;
+ const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ try code.ensureUnusedCapacity(str_lit.len);
+ code.appendSliceAssumeCapacity(bytes);
+ return Result{ .appended = {} };
+ },
else => unreachable,
},
else => |t| {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 55b9cd4b77..c1adbfe6cf 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -90,7 +90,15 @@ const FormatTypeAsCIdentContext = struct {
const ValueRenderLocation = enum {
FunctionArgument,
Initializer,
+ StaticInitializer,
Other,
+
+ pub fn isInitializer(self: ValueRenderLocation) bool {
+ return switch (self) {
+ .Initializer, .StaticInitializer => true,
+ else => false,
+ };
+ }
};
const BuiltinInfo = enum {
@@ -312,7 +320,7 @@ pub const Function = struct {
try writer.writeAll("static ");
try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .Const, alignment, .Complete);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, ty, val, .Initializer);
+ try f.object.dg.renderValue(writer, ty, val, .StaticInitializer);
try writer.writeAll(";\n ");
break :result decl_c_value;
} else CValue{ .constant = inst };
@@ -431,6 +439,10 @@ pub const Function = struct {
return f.object.dg.renderTypecast(w, t);
}
+ fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void {
+ return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src } }, src_ty, location);
+ }
+
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
return f.object.dg.fmtIntLiteral(ty, val);
}
@@ -502,6 +514,7 @@ pub const DeclGen = struct {
ty: Type,
val: Value,
decl_index: Decl.Index,
+ location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const decl = dg.module.declPtr(decl_index);
assert(decl.has_tv);
@@ -515,12 +528,16 @@ pub const DeclGen = struct {
inline for (.{ .function, .extern_fn }) |tag|
if (decl.val.castTag(tag)) |func|
if (func.data.owner_decl != decl_index)
- return dg.renderDeclValue(writer, ty, val, func.data.owner_decl);
+ return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location);
if (ty.isSlice()) {
- try writer.writeByte('(');
- try dg.renderTypecast(writer, ty);
- try writer.writeAll("){ .ptr = ");
+ if (location == .StaticInitializer) {
+ try writer.writeByte('{');
+ } else {
+ try writer.writeByte('(');
+ try dg.renderTypecast(writer, ty);
+ try writer.writeAll("){ .ptr = ");
+ }
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer);
@@ -530,7 +547,12 @@ pub const DeclGen = struct {
.data = val.sliceLen(dg.module),
};
const len_val = Value.initPayload(&len_pl.base);
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+
+ if (location == .StaticInitializer) {
+ return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ } else {
+ return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ }
}
// We shouldn't cast C function pointers as this is UB (when you call
@@ -552,7 +574,7 @@ pub const DeclGen = struct {
// that its contents are defined with respect to.
//
// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr
- fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type) error{ OutOfMemory, AnalysisFail }!void {
+ fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void {
if (!ptr_ty.isSlice()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ptr_ty);
@@ -567,7 +589,7 @@ pub const DeclGen = struct {
.variable => ptr_val.castTag(.variable).?.data.owner_decl,
else => unreachable,
};
- try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index);
+ try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location);
},
.field_ptr => {
const ptr_info = ptr_ty.ptrInfo();
@@ -605,7 +627,7 @@ pub const DeclGen = struct {
try writer.writeAll("&((");
try dg.renderTypecast(writer, u8_ptr_ty);
try writer.writeByte(')');
- try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty);
+ try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location);
return writer.print(")[{}]", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)});
} else {
var host_pl = Type.Payload.Bits{
@@ -617,7 +639,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderTypecast(writer, ptr_ty);
try writer.writeByte(')');
- return dg.renderParentPtr(writer, field_ptr.container_ptr, host_ty);
+ return dg.renderParentPtr(writer, field_ptr.container_ptr, host_ty, location);
},
},
.Union => switch (container_ty.containerLayout()) {
@@ -626,7 +648,7 @@ pub const DeclGen = struct {
.ty = container_ty.unionFields().values()[index].ty,
},
.Packed => {
- return dg.renderParentPtr(writer, field_ptr.container_ptr, ptr_ty);
+ return dg.renderParentPtr(writer, field_ptr.container_ptr, ptr_ty, location);
},
},
.Pointer => field_info: {
@@ -645,7 +667,7 @@ pub const DeclGen = struct {
try dg.renderType(std.io.null_writer, field_ptr.container_ty, .Complete);
try writer.writeAll("&(");
- try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty);
+ try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location);
try writer.writeAll(")->");
switch (field_ptr.container_ty.tag()) {
.union_tagged, .union_safety_tagged => try writer.writeAll("payload."),
@@ -653,7 +675,7 @@ pub const DeclGen = struct {
}
try writer.print("{ }", .{fmtIdent(field_info.name)});
} else {
- try dg.renderParentPtr(writer, field_ptr.container_ptr, field_info.ty);
+ try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location);
}
},
.elem_ptr => {
@@ -665,7 +687,7 @@ pub const DeclGen = struct {
const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
try writer.writeAll("&(");
- try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty);
+ try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location);
try writer.print(")[{d}]", .{elem_ptr.index});
},
.opt_payload_ptr, .eu_payload_ptr => {
@@ -680,7 +702,7 @@ pub const DeclGen = struct {
try dg.renderType(std.io.null_writer, payload_ptr.container_ty, .Complete);
try writer.writeAll("&(");
- try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty);
+ try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location);
try writer.writeAll(")->payload");
},
else => unreachable,
@@ -699,6 +721,10 @@ pub const DeclGen = struct {
val = rt.data;
}
const target = dg.module.getTarget();
+ const initializer_type: ValueRenderLocation = switch (location) {
+ .StaticInitializer => .StaticInitializer,
+ else => .Initializer,
+ };
const safety_on = switch (dg.module.optimizeMode()) {
.Debug, .ReleaseSafe => true,
@@ -714,15 +740,15 @@ pub const DeclGen = struct {
return writer.writeAll("false");
}
},
- .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val)}),
+ .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
var int_pl = Type.Payload.Bits{ .base = .{ .tag = .int_signed }, .data = bits };
const int_ty = Type.initPayload(&int_pl.base);
- try writer.writeByte('(');
- try dg.renderTypecast(writer, ty);
- try writer.writeAll(")zig_as_");
+ try writer.writeAll("zig_cast_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeAll(" zig_as_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
@@ -738,7 +764,7 @@ pub const DeclGen = struct {
return writer.writeByte(')');
},
.Pointer => if (ty.isSlice()) {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -766,21 +792,21 @@ pub const DeclGen = struct {
return dg.renderValue(writer, payload_ty, val, location);
}
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
}
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, val, .Initializer);
+ try dg.renderValue(writer, payload_ty, val, initializer_type);
try writer.writeAll(", .is_null = ");
- try dg.renderValue(writer, Type.bool, val, .Initializer);
+ try dg.renderValue(writer, Type.bool, val, initializer_type);
return writer.writeAll(" }");
},
.Struct => switch (ty.containerLayout()) {
.Auto, .Extern => {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -792,7 +818,7 @@ pub const DeclGen = struct {
if (!field.ty.hasRuntimeBits()) continue;
if (!empty) try writer.writeByte(',');
- try dg.renderValue(writer, field.ty, val, .Initializer);
+ try dg.renderValue(writer, field.ty, val, initializer_type);
empty = false;
}
@@ -802,7 +828,7 @@ pub const DeclGen = struct {
.Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}),
},
.Union => {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -813,34 +839,34 @@ pub const DeclGen = struct {
const layout = ty.unionGetLayout(target);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
- try dg.renderValue(writer, tag_ty, val, .Initializer);
+ try dg.renderValue(writer, tag_ty, val, initializer_type);
try writer.writeByte(',');
}
try writer.writeAll(" .payload = {");
}
for (ty.unionFields().values()) |field| {
if (!field.ty.hasRuntimeBits()) continue;
- try dg.renderValue(writer, field.ty, val, .Initializer);
+ try dg.renderValue(writer, field.ty, val, initializer_type);
break;
} else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)});
if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
return writer.writeByte('}');
},
.ErrorUnion => {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
}
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, ty.errorUnionPayload(), val, .Initializer);
+ try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type);
return writer.print(", .error = {x} }}", .{
try dg.fmtIntLiteral(ty.errorUnionSet(), val),
});
},
.Array, .Vector => {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -848,19 +874,20 @@ pub const DeclGen = struct {
const ai = ty.arrayInfo();
if (ai.elem_type.eql(Type.u8, dg.module)) {
- try writer.writeByte('"');
+ var literal = stringLiteral(writer);
+ try literal.start();
const c_len = ty.arrayLenIncludingSentinel();
var index: usize = 0;
while (index < c_len) : (index += 1)
- try writeStringLiteralChar(writer, 0xaa);
- return writer.writeByte('"');
+ try literal.writeChar(0xaa);
+ return literal.end();
} else {
try writer.writeByte('{');
const c_len = ty.arrayLenIncludingSentinel();
var index: usize = 0;
while (index < c_len) : (index += 1) {
if (index > 0) try writer.writeAll(", ");
- try dg.renderValue(writer, ty.childType(), val, .Initializer);
+ try dg.renderValue(writer, ty.childType(), val, initializer_type);
}
return writer.writeByte('}');
}
@@ -893,8 +920,8 @@ pub const DeclGen = struct {
.eu_payload_ptr,
.decl_ref_mut,
.decl_ref,
- => try dg.renderParentPtr(writer, val, ty),
- else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val)}),
+ => try dg.renderParentPtr(writer, val, ty, location),
+ else => try writer.print("{}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
},
.Float => {
const bits = ty.floatBits(target);
@@ -926,9 +953,10 @@ pub const DeclGen = struct {
};
const int_val = Value.initPayload(&int_val_pl.base);
- try writer.writeByte('(');
- try dg.renderTypecast(writer, ty);
- try writer.writeByte(')');
+ try writer.writeAll("zig_cast_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte(' ');
+ var empty = true;
if (std.math.isFinite(f128_val)) {
try writer.writeAll("zig_as_");
try dg.renderTypeForBuiltinFnName(writer, ty);
@@ -941,17 +969,32 @@ pub const DeclGen = struct {
128 => try writer.print("{x}", .{f128_val}),
else => unreachable,
}
+ try writer.writeAll(", ");
+ empty = false;
} else {
- const operation = if (std.math.isSignalNan(f128_val))
- "nans"
- else if (std.math.isNan(f128_val))
+ // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan
+ const operation = if (std.math.isNan(f128_val))
"nan"
+ else if (std.math.isSignalNan(f128_val))
+ "nans"
else if (std.math.isInf(f128_val))
"inf"
else
unreachable;
+ if (location == .StaticInitializer) {
+ if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val))
+ return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{});
+
+ // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression
+
+ // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly
+ // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128)
+ // return dg.fail("Only quiet nans are supported in global variable initializers", .{});
+ }
+
try writer.writeAll("zig_as_special_");
+ if (location == .StaticInitializer) try writer.writeAll("constant_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
if (std.math.signbit(f128_val)) try writer.writeByte('-');
@@ -968,8 +1011,12 @@ pub const DeclGen = struct {
128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
else => unreachable,
};
+ try writer.writeAll(", ");
+ empty = false;
}
- return writer.print(", {x})", .{try dg.fmtIntLiteral(int_ty, int_val)});
+ try writer.print("{x}", .{try dg.fmtIntLiteralLoc(int_ty, int_val, location)});
+ if (!empty) try writer.writeByte(')');
+ return;
},
.Pointer => switch (val.tag()) {
.null_value, .zero => if (ty.isSlice()) {
@@ -987,10 +1034,10 @@ pub const DeclGen = struct {
},
.variable => {
const decl = val.castTag(.variable).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl);
+ return dg.renderDeclValue(writer, ty, val, decl, location);
},
.slice => {
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -1000,9 +1047,9 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try writer.writeByte('{');
- try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, .Initializer);
+ try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type);
try writer.writeAll(", ");
- try dg.renderValue(writer, Type.usize, slice.len, .Initializer);
+ try dg.renderValue(writer, Type.usize, slice.len, initializer_type);
try writer.writeByte('}');
},
.function => {
@@ -1024,7 +1071,7 @@ pub const DeclGen = struct {
.eu_payload_ptr,
.decl_ref_mut,
.decl_ref,
- => try dg.renderParentPtr(writer, val, ty),
+ => try dg.renderParentPtr(writer, val, ty, location),
else => unreachable,
},
.Array, .Vector => {
@@ -1040,7 +1087,7 @@ pub const DeclGen = struct {
try writer.writeByte('{');
const ai = ty.arrayInfo();
if (ai.sentinel) |s| {
- try dg.renderValue(writer, ai.elem_type, s, .Initializer);
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
} else {
try writer.writeByte('0');
}
@@ -1060,31 +1107,51 @@ pub const DeclGen = struct {
defer arena.deinit();
const arena_allocator = arena.allocator();
+ // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal
+ const max_string_initializer_len = 65535;
+
const ai = ty.arrayInfo();
if (ai.elem_type.eql(Type.u8, dg.module)) {
- try writer.writeByte('"');
- var index: usize = 0;
- while (index < ai.len) : (index += 1) {
- const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- const elem_val_u8 = @intCast(u8, elem_val.toUnsignedInt(target));
- try writeStringLiteralChar(writer, elem_val_u8);
- }
- if (ai.sentinel) |s| {
- const s_u8 = @intCast(u8, s.toUnsignedInt(target));
- try writeStringLiteralChar(writer, s_u8);
+ if (ai.len <= max_string_initializer_len) {
+ var literal = stringLiteral(writer);
+ try literal.start();
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ const elem_val = try val.elemValue(dg.module, arena_allocator, index);
+ const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
+ try literal.writeChar(elem_val_u8);
+ }
+ if (ai.sentinel) |s| {
+ const s_u8 = @intCast(u8, s.toUnsignedInt(target));
+ try literal.writeChar(s_u8);
+ }
+ try literal.end();
+ } else {
+ try writer.writeByte('{');
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ if (index != 0) try writer.writeByte(',');
+ const elem_val = try val.elemValue(dg.module, arena_allocator, index);
+ const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
+ try writer.print("'\\x{x}'", .{elem_val_u8});
+ }
+ if (ai.sentinel) |s| {
+ if (index != 0) try writer.writeByte(',');
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ }
+ try writer.writeByte('}');
}
- try writer.writeByte('"');
} else {
try writer.writeByte('{');
var index: usize = 0;
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- try dg.renderValue(writer, ai.elem_type, elem_val, .Initializer);
+ try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type);
}
if (ai.sentinel) |s| {
if (index != 0) try writer.writeByte(',');
- try dg.renderValue(writer, ai.elem_type, s, .Initializer);
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
}
try writer.writeByte('}');
}
@@ -1111,7 +1178,7 @@ pub const DeclGen = struct {
return dg.renderValue(writer, payload_ty, payload_val, location);
}
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -1120,9 +1187,9 @@ pub const DeclGen = struct {
const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef;
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, .Initializer);
+ try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
try writer.writeAll(", .is_null = ");
- try dg.renderValue(writer, Type.bool, is_null_val, .Initializer);
+ try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
try writer.writeAll(" }");
},
.ErrorSet => {
@@ -1145,7 +1212,7 @@ pub const DeclGen = struct {
return dg.renderValue(writer, error_ty, val, location);
}
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -1155,9 +1222,9 @@ pub const DeclGen = struct {
const error_val = if (val.errorUnionIsPayload()) Value.zero else val;
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, .Initializer);
+ try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
try writer.writeAll(", .error = ");
- try dg.renderValue(writer, error_ty, error_val, .Initializer);
+ try dg.renderValue(writer, error_ty, error_val, initializer_type);
try writer.writeAll(" }");
},
.Enum => {
@@ -1197,11 +1264,11 @@ pub const DeclGen = struct {
.Fn => switch (val.tag()) {
.function => {
const decl = val.castTag(.function).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl);
+ return dg.renderDeclValue(writer, ty, val, decl, location);
},
.extern_fn => {
const decl = val.castTag(.extern_fn).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl);
+ return dg.renderDeclValue(writer, ty, val, decl, location);
},
else => unreachable,
},
@@ -1209,7 +1276,7 @@ pub const DeclGen = struct {
.Auto, .Extern => {
const field_vals = val.castTag(.aggregate).?.data;
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -1222,7 +1289,7 @@ pub const DeclGen = struct {
if (!field_ty.hasRuntimeBits()) continue;
if (!empty) try writer.writeByte(',');
- try dg.renderValue(writer, field_ty, field_val, .Initializer);
+ try dg.renderValue(writer, field_ty, field_val, initializer_type);
empty = false;
}
@@ -1242,31 +1309,85 @@ pub const DeclGen = struct {
var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 };
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
- try writer.writeByte('(');
- var empty = true;
- for (field_vals) |field_val, index| {
+ var eff_num_fields: usize = 0;
+ for (field_vals) |_, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
- if (!empty) try writer.writeAll(" | ");
+ eff_num_fields += 1;
+ }
+
+ if (eff_num_fields == 0) {
try writer.writeByte('(');
- try dg.renderTypecast(writer, ty);
+ try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
- try dg.renderValue(writer, field_ty, field_val, .Other);
- try writer.writeAll(" << ");
- try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ } else if (ty.bitSize(target) > 64) {
+ // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
+ var num_or = eff_num_fields - 1;
+ while (num_or > 0) : (num_or -= 1) {
+ try writer.writeAll("zig_or_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ }
- bit_offset_val_pl.data += field_ty.bitSize(target);
- empty = false;
+ var eff_index: usize = 0;
+ var needs_closing_paren = false;
+ for (field_vals) |field_val, index| {
+ const field_ty = ty.structFieldType(index);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+
+ const cast_context = IntCastContext{ .value = .{ .value = field_val } };
+ if (bit_offset_val_pl.data != 0) {
+ try writer.writeAll("zig_shl_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
+ try writer.writeAll(", ");
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try writer.writeByte(')');
+ } else {
+ try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
+ }
+
+ if (needs_closing_paren) try writer.writeByte(')');
+ if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
+
+ bit_offset_val_pl.data += field_ty.bitSize(target);
+ needs_closing_paren = true;
+ eff_index += 1;
+ }
+ } else {
+ try writer.writeByte('(');
+ // a << a_off | b << b_off | c << c_off
+ var empty = true;
+ for (field_vals) |field_val, index| {
+ const field_ty = ty.structFieldType(index);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+
+ if (!empty) try writer.writeAll(" | ");
+ try writer.writeByte('(');
+ try dg.renderTypecast(writer, ty);
+ try writer.writeByte(')');
+
+ if (bit_offset_val_pl.data != 0) {
+ try dg.renderValue(writer, field_ty, field_val, .Other);
+ try writer.writeAll(" << ");
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ } else {
+ try dg.renderValue(writer, field_ty, field_val, .Other);
+ }
+
+ bit_offset_val_pl.data += field_ty.bitSize(target);
+ empty = false;
+ }
+ try writer.writeByte(')');
}
- if (empty) try dg.renderValue(writer, ty, Value.undef, .Initializer);
- try writer.writeByte(')');
},
},
.Union => {
const union_obj = val.castTag(.@"union").?.data;
- if (location != .Initializer) {
+ if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
@@ -1286,7 +1407,7 @@ pub const DeclGen = struct {
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, field_ty, union_obj.val, .Initializer);
+ try dg.renderValue(writer, field_ty, union_obj.val, initializer_type);
} else {
try writer.writeAll("0");
}
@@ -1298,7 +1419,7 @@ pub const DeclGen = struct {
const layout = ty.unionGetLayout(target);
if (layout.tag_size != 0) {
try writer.writeAll(".tag = ");
- try dg.renderValue(writer, tag_ty, union_obj.tag, .Initializer);
+ try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type);
try writer.writeAll(", ");
}
try writer.writeAll(".payload = {");
@@ -1307,11 +1428,11 @@ pub const DeclGen = struct {
var it = ty.unionFields().iterator();
if (field_ty.hasRuntimeBits()) {
try writer.print(".{ } = ", .{fmtIdent(field_name)});
- try dg.renderValue(writer, field_ty, union_obj.val, .Initializer);
+ try dg.renderValue(writer, field_ty, union_obj.val, initializer_type);
} else while (it.next()) |field| {
if (!field.value_ptr.ty.hasRuntimeBits()) continue;
try writer.print(".{ } = ", .{fmtIdent(field.key_ptr.*)});
- try dg.renderValue(writer, field.value_ptr.ty, Value.undef, .Initializer);
+ try dg.renderValue(writer, field.value_ptr.ty, Value.undef, initializer_type);
break;
} else try writer.writeAll(".empty_union = 0");
if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
@@ -2082,6 +2203,103 @@ pub const DeclGen = struct {
});
}
+ const IntCastContext = union(enum) {
+ c_value: struct {
+ f: *Function,
+ value: CValue,
+ },
+ value: struct {
+ value: Value,
+ },
+
+ pub fn writeValue(self: *const IntCastContext, dg: *DeclGen, w: anytype, value_ty: Type, location: ValueRenderLocation) !void {
+ switch (self.*) {
+ .c_value => |v| {
+ try v.f.writeCValue(w, v.value, location);
+ },
+ .value => |v| {
+ try dg.renderValue(w, value_ty, v.value, location);
+ },
+ }
+ }
+ };
+
+ /// Renders a cast to an int type, from either an int or a pointer.
+ ///
+ /// Some platforms don't have 128 bit integers, so we need to use
+ /// the zig_as_ and zig_lo_ macros in those cases.
+ ///
+ /// | Dest type bits | Src type | Result
+ /// |------------------|------------------|---------------------------|
+ /// | < 64 bit integer | pointer | (zig_<dest_ty>)(zig_<u|i>size)src
+ /// | < 64 bit integer | < 64 bit integer | (zig_<dest_ty>)src
+ /// | < 64 bit integer | > 64 bit integer | zig_lo(src)
+ /// | > 64 bit integer | pointer | zig_as_<dest_ty>(0, (zig_<u|i>size)src)
+ /// | > 64 bit integer | < 64 bit integer | zig_as_<dest_ty>(0, src)
+ /// | > 64 bit integer | > 64 bit integer | zig_as_<dest_ty>(zig_hi_<src_ty>(src), zig_lo_<src_ty>(src))
+ fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void {
+ const target = dg.module.getTarget();
+ const dest_bits = dest_ty.bitSize(target);
+ const dest_int_info = dest_ty.intInfo(target);
+
+ const src_is_ptr = src_ty.isPtrAtRuntime();
+ const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) {
+ .unsigned => Type.usize,
+ .signed => Type.isize,
+ } else src_ty;
+
+ const src_bits = src_eff_ty.bitSize(target);
+ const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null;
+ if (dest_bits <= 64 and src_bits <= 64) {
+ const needs_cast = src_int_info == null or
+ (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or
+ dest_int_info.signedness != src_int_info.?.signedness);
+
+ if (needs_cast) {
+ try w.writeByte('(');
+ try dg.renderTypecast(w, dest_ty);
+ try w.writeByte(')');
+ }
+ if (src_is_ptr) {
+ try w.writeByte('(');
+ try dg.renderTypecast(w, src_eff_ty);
+ try w.writeByte(')');
+ }
+ try context.writeValue(dg, w, src_ty, location);
+ } else if (dest_bits <= 64 and src_bits > 64) {
+ assert(!src_is_ptr);
+ try w.writeAll("zig_lo_");
+ try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
+ try w.writeByte('(');
+ try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try w.writeByte(')');
+ } else if (dest_bits > 64 and src_bits <= 64) {
+ try w.writeAll("zig_as_");
+ try dg.renderTypeForBuiltinFnName(w, dest_ty);
+ try w.writeAll("(0, "); // TODO: Should the 0 go through fmtIntLiteral?
+ if (src_is_ptr) {
+ try w.writeByte('(');
+ try dg.renderTypecast(w, src_eff_ty);
+ try w.writeByte(')');
+ }
+ try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try w.writeByte(')');
+ } else {
+ assert(!src_is_ptr);
+ try w.writeAll("zig_as_");
+ try dg.renderTypeForBuiltinFnName(w, dest_ty);
+ try w.writeAll("(zig_hi_");
+ try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
+ try w.writeByte('(');
+ try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try w.writeAll("), zig_lo_");
+ try dg.renderTypeForBuiltinFnName(w, src_eff_ty);
+ try w.writeByte('(');
+ try context.writeValue(dg, w, src_ty, .FunctionArgument);
+ try w.writeAll("))");
+ }
+ }
+
/// Renders a type in C typecast format.
///
/// This is guaranteed to be valid in a typecast expression, but not
@@ -2131,7 +2349,7 @@ pub const DeclGen = struct {
const c_len_val = Value.initPayload(&c_len_pl.base);
try suffix_writer.writeByte('[');
- if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("static const ");
+ if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("zig_const_arr ");
try suffix.writer().print("{}]", .{try dg.fmtIntLiteral(Type.usize, c_len_val)});
render_ty = array_info.elem_type;
depth += 1;
@@ -2303,6 +2521,9 @@ pub const DeclGen = struct {
try dg.writeCValue(writer, member);
}
+ const IdentHasher = std.crypto.auth.siphash.SipHash128(1, 3);
+ const ident_hasher_init: IdentHasher = IdentHasher.init(&[_]u8{0} ** IdentHasher.key_length);
+
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
@@ -2320,7 +2541,18 @@ pub const DeclGen = struct {
const gpa = dg.gpa;
const name = try decl.getFullyQualifiedName(dg.module);
defer gpa.free(name);
- return writer.print("{}", .{fmtIdent(name)});
+
+ // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), expand
+ // to 3x the length of its input
+ if (name.len > 1365) {
+ var hash = ident_hasher_init;
+ hash.update(name);
+ const ident_hash = hash.finalInt();
+ try writer.writeAll("zig_D_");
+ return std.fmt.formatIntValue(ident_hash, "x", .{}, writer);
+ } else {
+ return writer.print("{}", .{fmtIdent(name)});
+ }
}
}
@@ -2333,6 +2565,10 @@ pub const DeclGen = struct {
try writer.print("{c}{d}", .{ signAbbrev(int_info.signedness), c_bits });
} else if (ty.isRuntimeFloat()) {
try ty.print(writer, dg.module);
+ } else if (ty.isPtrAtRuntime()) {
+ try writer.print("p{d}", .{ty.bitSize(target)});
+ } else if (ty.zigTypeTag() == .Bool) {
+ try writer.print("u8", .{});
} else return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
ty.fmt(dg.module),
});
@@ -2385,6 +2621,19 @@ pub const DeclGen = struct {
.mod = dg.module,
} };
}
+
+ fn fmtIntLiteralLoc(
+ dg: *DeclGen,
+ ty: Type,
+ val: Value,
+ location: ValueRenderLocation, // TODO: Instead add this as optional arg to fmtIntLiteral
+ ) !std.fmt.Formatter(formatIntLiteral) {
+ const int_info = ty.intInfo(dg.module.getTarget());
+ const c_bits = toCIntBits(int_info.bits);
+ if (c_bits == null or c_bits.? > 128)
+ return dg.fail("TODO implement integer constants larger than 128 bits", .{});
+ return std.fmt.Formatter(formatIntLiteral){ .data = .{ .ty = ty, .val = val, .mod = dg.module, .location = location } };
+ }
};
pub fn genGlobalAsm(mod: *Module, code: *std.ArrayList(u8)) !void {
@@ -2430,7 +2679,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .Const, 0, .Complete);
try writer.writeAll(" = ");
- try o.dg.renderValue(writer, name_ty, name_val, .Initializer);
+ try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer);
try writer.writeAll(";\n");
}
@@ -2601,7 +2850,7 @@ pub fn genDecl(o: *Object) !void {
if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
try w.writeAll(" = ");
- try o.dg.renderValue(w, tv.ty, variable.init, .Initializer);
+ try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
@@ -2618,7 +2867,7 @@ pub fn genDecl(o: *Object) !void {
// https://github.com/ziglang/zig/issues/7582
try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
try writer.writeAll(" = ");
- try o.dg.renderValue(writer, tv.ty, tv.val, .Initializer);
+ try o.dg.renderValue(writer, tv.ty, tv.val, .StaticInitializer);
try writer.writeAll(";\n");
}
}
@@ -2908,6 +3157,12 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
+ .vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
+
+ .c_va_arg => return f.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return f.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return f.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return f.fail("TODO implement c_va_start", .{}),
// zig fmt: on
};
if (result_value == .local) {
@@ -3235,11 +3490,20 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, field_ty);
try writer.writeAll("((");
try f.renderTypecast(writer, field_ty);
- try writer.writeAll(")zig_shr_");
+ try writer.writeByte(')');
+ const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ if (cant_cast) {
+ if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ try writer.writeAll("zig_lo_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
+ try writer.writeByte('(');
+ }
+ try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, operand);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
+ if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_ty, .Bits);
try writer.writeByte(')');
} else {
@@ -3313,11 +3577,11 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst, inst_ty);
+ const operand_ty = f.air.typeOf(ty_op.operand);
+
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = (");
- try f.renderTypecast(writer, inst_ty);
- try writer.writeByte(')');
- try f.writeCValue(writer, operand, .Other);
+ try writer.writeAll(" = ");
+ try f.renderIntCast(writer, inst_ty, operand, operand_ty, .Other);
try writer.writeAll(";\n");
return local;
}
@@ -3337,15 +3601,27 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const target = f.object.dg.module.getTarget();
const dest_int_info = inst_ty.intInfo(target);
const dest_bits = dest_int_info.bits;
+ const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
+ return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const operand_int_info = operand_ty.intInfo(target);
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = (");
- try f.renderTypecast(writer, inst_ty);
- try writer.writeByte(')');
+ try writer.writeAll(" = ");
+
+ const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64;
+ if (needs_lo) {
+ try writer.writeAll("zig_lo_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try writer.writeByte('(');
+ } else if (dest_c_bits <= 64) {
+ try writer.writeByte('(');
+ try f.renderTypecast(writer, inst_ty);
+ try writer.writeByte(')');
+ }
if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) {
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll(";\n");
} else switch (dest_int_info.signedness) {
.unsigned => {
var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
@@ -3356,14 +3632,14 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
const mask_val = try inst_ty.maxInt(stack.get(), target);
-
+ try writer.writeAll("zig_and_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
- try f.writeCValue(writer, operand, .Other);
- try writer.print(" & {x});\n", .{try f.fmtIntLiteral(inst_ty, mask_val)});
+ try f.writeCValue(writer, operand, .FunctionArgument);
+ try writer.print(", {x})", .{try f.fmtIntLiteral(operand_ty, mask_val)});
},
.signed => {
- const operand_ty = f.air.typeOf(ty_op.operand);
- const c_bits = toCIntBits(operand_ty.intInfo(target).bits) orelse
+ const c_bits = toCIntBits(operand_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
var shift_pl = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
@@ -3371,11 +3647,29 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
};
const shift_val = Value.initPayload(&shift_pl.base);
- try writer.print("((int{d}_t)((uint{0d}_t)", .{c_bits});
- try f.writeCValue(writer, operand, .Other);
- try writer.print(" << {}) >> {0});\n", .{try f.fmtIntLiteral(Type.u8, shift_val)});
+ try writer.writeAll("zig_shr_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ if (c_bits == 128) {
+ try writer.print("(zig_bitcast_i{d}(", .{c_bits});
+ } else {
+ try writer.print("((int{d}_t)", .{c_bits});
+ }
+ try writer.print("zig_shl_u{d}(", .{c_bits});
+ if (c_bits == 128) {
+ try writer.print("zig_bitcast_u{d}(", .{c_bits});
+ } else {
+ try writer.print("(uint{d}_t)", .{c_bits});
+ }
+ try f.writeCValue(writer, operand, .FunctionArgument);
+ if (c_bits == 128) try writer.writeByte(')');
+ try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
+ if (c_bits == 128) try writer.writeByte(')');
+ try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
},
}
+
+ if (needs_lo) try writer.writeByte(')');
+ try writer.writeAll(";\n");
return local;
}
@@ -3512,15 +3806,26 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueDeref(writer, ptr_val);
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
- try writer.writeAll("((");
- try f.renderTypecast(writer, host_ty);
- try writer.writeByte(')');
+ try writer.writeByte('(');
+ const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ if (cant_cast) {
+ if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ try writer.writeAll("zig_as_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
+ try writer.writeAll("(0, ");
+ } else {
+ try writer.writeByte('(');
+ try f.renderTypecast(writer, host_ty);
+ try writer.writeByte(')');
+ }
+
if (src_ty.isPtrAtRuntime()) {
try writer.writeByte('(');
try f.renderTypecast(writer, Type.usize);
try writer.writeByte(')');
}
try f.writeCValue(writer, src_val, .Other);
+ if (cant_cast) try writer.writeByte(')');
try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
} else {
try f.writeCValueDeref(writer, ptr_val);
@@ -3585,6 +3890,10 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
+ const inst_ty = f.air.typeOfIndex(inst);
+ if (inst_ty.tag() != .bool)
+ return try airUnBuiltinCall(f, inst, "not", .Bits);
+
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
if (f.liveness.isUnused(inst)) {
@@ -3596,14 +3905,13 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst, inst_ty);
+
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- try writer.writeByte(if (inst_ty.tag() == .bool) '!' else '~');
+ try writer.writeByte('!');
try f.writeCValue(writer, op, .Other);
try writer.writeAll(";\n");
-
return local;
}
@@ -4370,7 +4678,15 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, cond, .Other);
try writer.writeAll(") ");
try genBody(f, then_body);
- try writer.writeAll(" else ");
+
+ // TODO: If body ends in goto, elide the else block?
+ const needs_else = then_body.len <= 0 or f.air.instructions.items(.tag)[then_body[then_body.len - 1]] != .br;
+ if (needs_else) {
+ try writer.writeAll(" else ");
+ } else {
+ try writer.writeByte('\n');
+ }
+
f.value_map.deinit();
f.value_map = cloned_map.move();
const free_locals = f.getFreeLocals();
@@ -4383,7 +4699,12 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
try noticeBranchFrees(f, pre_locals_len, inst);
- try genBody(f, else_body);
+ if (needs_else) {
+ try genBody(f, else_body);
+ } else {
+ try genBodyInner(f, else_body);
+ }
+
try f.object.indent_writer.insertNewline();
return CValue.none;
@@ -5125,7 +5446,9 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
.begin, .end => {
try writer.writeByte('(');
try f.writeCValue(writer, struct_ptr, .Other);
- try writer.print(")[{}]", .{@boolToInt(field_loc == .end)});
+ try writer.print(")[{}]", .{
+ @boolToInt(field_loc == .end and struct_ty.hasRuntimeBitsIgnoreComptime()),
+ });
},
.field => |field| if (extra_name != .none) {
try f.writeCValueDerefMember(writer, struct_ptr, extra_name);
@@ -5204,13 +5527,22 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
try writer.writeAll("((");
try f.renderTypecast(writer, field_int_ty);
- try writer.writeAll(")zig_shr_");
+ try writer.writeByte(')');
+ const cant_cast = int_info.bits > 64;
+ if (cant_cast) {
+ if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ try writer.writeAll("zig_lo_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
+ try writer.writeByte('(');
+ }
+ try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
try f.writeCValue(writer, struct_byval, .Other);
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
+ if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .Bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
@@ -5798,7 +6130,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(";\n");
try writer.writeAll("if (");
try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor});
- try f.renderTypecast(writer, ptr_ty.elemType());
+ try f.renderTypecast(writer, ptr_ty.childType());
try writer.writeByte(')');
if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
@@ -5811,6 +6143,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writeMemoryOrder(writer, extra.successOrder());
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.failureOrder());
+ try writer.writeAll(", ");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType());
try writer.writeByte(')');
try writer.writeAll(") {\n");
f.object.indent_writer.pushIndent();
@@ -5825,7 +6159,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(";\n");
try f.writeCValue(writer, local, .Other);
try writer.print(".is_null = zig_cmpxchg_{s}((zig_atomic(", .{flavor});
- try f.renderTypecast(writer, ptr_ty.elemType());
+ try f.renderTypecast(writer, ptr_ty.childType());
try writer.writeByte(')');
if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
@@ -5838,6 +6172,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writeMemoryOrder(writer, extra.successOrder());
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.failureOrder());
+ try writer.writeAll(", ");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType());
try writer.writeByte(')');
try writer.writeAll(";\n");
}
@@ -5860,8 +6196,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ pl_op.operand, extra.operand });
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
+ try f.writeCValue(writer, local, .Other);
try writer.print(" = zig_atomicrmw_{s}((", .{toAtomicRmwSuffix(extra.op())});
switch (extra.op()) {
else => {
@@ -5881,6 +6217,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.ordering());
+ try writer.writeAll(", ");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType());
try writer.writeAll(");\n");
if (f.liveness.isUnused(inst)) {
@@ -5913,6 +6251,8 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
try writeMemoryOrder(writer, atomic_load.order);
+ try writer.writeAll(", ");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType());
try writer.writeAll(");\n");
return local;
@@ -5934,7 +6274,9 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
try f.writeCValue(writer, element, .FunctionArgument);
- try writer.print(", {s});\n", .{order});
+ try writer.print(", {s}, ", .{order});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType());
+ try writer.writeAll(");\n");
return CValue.none;
}
@@ -6391,9 +6733,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
},
.Packed => {
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = (");
- try f.renderTypecast(writer, inst_ty);
- try writer.writeAll(")");
+ try writer.writeAll(" = ");
const int_info = inst_ty.intInfo(target);
var bit_offset_ty_pl = Type.Payload.Bits{
@@ -6423,20 +6763,28 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
if (!empty) try writer.writeAll(", ");
+ // TODO: Skip this entire shift if val is 0?
try writer.writeAll("zig_shlw_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
- try writer.writeAll("((");
- try f.renderTypecast(writer, inst_ty);
- try writer.writeByte(')');
- if (field_ty.isPtrAtRuntime()) {
+ try writer.writeByte('(');
+
+ if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
+ try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument);
+ } else {
try writer.writeByte('(');
- try f.renderTypecast(writer, switch (int_info.signedness) {
- .unsigned => Type.usize,
- .signed => Type.isize,
- });
+ try f.renderTypecast(writer, inst_ty);
try writer.writeByte(')');
+ if (field_ty.isPtrAtRuntime()) {
+ try writer.writeByte('(');
+ try f.renderTypecast(writer, switch (int_info.signedness) {
+ .unsigned => Type.usize,
+ .signed => Type.isize,
+ });
+ try writer.writeByte(')');
+ }
+ try f.writeCValue(writer, element, .Other);
}
- try f.writeCValue(writer, element, .Other);
+
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
@@ -6446,7 +6794,14 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
bit_offset_val_pl.data += field_ty.bitSize(target);
empty = false;
}
- if (empty) try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer);
+
+ if (empty) {
+ try writer.writeByte('(');
+ try f.renderTypecast(writer, inst_ty);
+ try writer.writeByte(')');
+ try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer);
+ }
+
try writer.writeAll(";\n");
},
},
@@ -6779,6 +7134,68 @@ fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
} else unreachable;
}
+fn StringLiteral(comptime WriterType: type) type {
+ // MSVC has a length limit of 16380 per string literal (before concatenation)
+ const max_char_len = 4;
+ const max_len = 16380 - max_char_len;
+
+ return struct {
+ cur_len: u64 = 0,
+ counting_writer: std.io.CountingWriter(WriterType),
+
+ pub const Error = WriterType.Error;
+
+ const Self = @This();
+
+ pub fn start(self: *Self) Error!void {
+ const writer = self.counting_writer.writer();
+ try writer.writeByte('\"');
+ }
+
+ pub fn end(self: *Self) Error!void {
+ const writer = self.counting_writer.writer();
+ try writer.writeByte('\"');
+ }
+
+ fn writeStringLiteralChar(writer: anytype, c: u8) !void {
+ switch (c) {
+ 7 => try writer.writeAll("\\a"),
+ 8 => try writer.writeAll("\\b"),
+ '\t' => try writer.writeAll("\\t"),
+ '\n' => try writer.writeAll("\\n"),
+ 11 => try writer.writeAll("\\v"),
+ 12 => try writer.writeAll("\\f"),
+ '\r' => try writer.writeAll("\\r"),
+ '"', '\'', '?', '\\' => try writer.print("\\{c}", .{c}),
+ else => switch (c) {
+ ' '...'~' => try writer.writeByte(c),
+ else => try writer.print("\\{o:0>3}", .{c}),
+ },
+ }
+ }
+
+ pub fn writeChar(self: *Self, c: u8) Error!void {
+ const writer = self.counting_writer.writer();
+
+ if (self.cur_len == 0 and self.counting_writer.bytes_written > 1)
+ try writer.writeAll("\"\"");
+
+ const len = self.counting_writer.bytes_written;
+ try writeStringLiteralChar(writer, c);
+
+ const char_length = self.counting_writer.bytes_written - len;
+ assert(char_length <= max_char_len);
+ self.cur_len += char_length;
+
+ if (self.cur_len >= max_len) self.cur_len = 0;
+ }
+ };
+}
+
+fn stringLiteral(child_stream: anytype) StringLiteral(@TypeOf(child_stream)) {
+ return .{ .counting_writer = std.io.countingWriter(child_stream) };
+}
+
fn formatStringLiteral(
str: []const u8,
comptime fmt: []const u8,
@@ -6786,44 +7203,25 @@ fn formatStringLiteral(
writer: anytype,
) @TypeOf(writer).Error!void {
if (fmt.len != 1 or fmt[0] != 's') @compileError("Invalid fmt: " ++ fmt);
- try writer.writeByte('\"');
+
+ var literal = stringLiteral(writer);
+ try literal.start();
for (str) |c|
- try writeStringLiteralChar(writer, c);
- try writer.writeByte('\"');
+ try literal.writeChar(c);
+ try literal.end();
}
fn fmtStringLiteral(str: []const u8) std.fmt.Formatter(formatStringLiteral) {
return .{ .data = str };
}
-fn writeStringLiteralChar(writer: anytype, c: u8) !void {
- switch (c) {
- 7 => try writer.writeAll("\\a"),
- 8 => try writer.writeAll("\\b"),
- '\t' => try writer.writeAll("\\t"),
- '\n' => try writer.writeAll("\\n"),
- 11 => try writer.writeAll("\\v"),
- 12 => try writer.writeAll("\\f"),
- '\r' => try writer.writeAll("\\r"),
- '"', '\'', '?', '\\' => try writer.print("\\{c}", .{c}),
- else => switch (c) {
- ' '...'~' => try writer.writeByte(c),
- else => try writer.print("\\{o:0>3}", .{c}),
- },
- }
-}
-
fn undefPattern(comptime IntType: type) IntType {
const int_info = @typeInfo(IntType).Int;
const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
}
-const FormatIntLiteralContext = struct {
- ty: Type,
- val: Value,
- mod: *Module,
-};
+const FormatIntLiteralContext = struct { ty: Type, val: Value, mod: *Module, location: ?ValueRenderLocation = null };
fn formatIntLiteral(
data: FormatIntLiteralContext,
comptime fmt: []const u8,
@@ -6891,10 +7289,31 @@ fn formatIntLiteral(
return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" });
}
- if (!int.positive) try writer.writeByte('-');
+ var use_twos_comp = false;
+ if (!int.positive) {
+ if (c_bits > 64) {
+ // TODO: Can this be done for decimal literals as well?
+ if (fmt.len == 1 and fmt[0] != 'd') {
+ use_twos_comp = true;
+ } else {
+ // TODO: Use fmtIntLiteral for 0?
+ try writer.print("zig_sub_{c}{d}(zig_as_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits });
+ }
+ } else {
+ try writer.writeByte('-');
+ }
+ }
+
switch (data.ty.tag()) {
.c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
- else => try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }),
+ else => {
+ if (int_info.bits > 64 and data.location != null and data.location.? == .StaticInitializer) {
+ // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers
+ try writer.print("zig_as_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
+ } else {
+ try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
+ }
+ },
}
const limbs_count_64 = @divExact(64, @bitSizeOf(BigIntLimb));
@@ -6934,16 +7353,34 @@ fn formatIntLiteral(
} else {
assert(c_bits == 128);
const split = std.math.min(int.limbs.len, limbs_count_64);
+ var twos_comp_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
+
+ // Adding a negation in the C code before the doesn't work in all cases:
+ // - struct versions would require an extra zig_sub_ call to negate, which wouldn't work in constant expressions
+ // - negating the f80 int representation (i128) doesn't make sense
+ // Instead we write out the literal as a negative number in twos complement
+ var limbs = int.limbs;
+
+ if (use_twos_comp) {
+ var twos_comp = BigInt.Mutable{
+ .limbs = &twos_comp_limbs,
+ .positive = undefined,
+ .len = undefined,
+ };
+
+ twos_comp.convertToTwosComplement(int, .signed, int_info.bits);
+ limbs = twos_comp.limbs;
+ }
var upper_pl = Value.Payload.BigInt{
.base = .{ .tag = .int_big_positive },
- .data = int.limbs[split..],
+ .data = limbs[split..],
};
const upper_val = Value.initPayload(&upper_pl.base);
try formatIntLiteral(.{
.ty = switch (int_info.signedness) {
.unsigned => Type.u64,
- .signed => Type.i64,
+ .signed => if (use_twos_comp) Type.u64 else Type.i64,
},
.val = upper_val,
.mod = data.mod,
@@ -6953,7 +7390,7 @@ fn formatIntLiteral(
var lower_pl = Value.Payload.BigInt{
.base = .{ .tag = .int_big_positive },
- .data = int.limbs[0..split],
+ .data = limbs[0..split],
};
const lower_val = Value.initPayload(&lower_pl.base);
try formatIntLiteral(.{
@@ -6962,6 +7399,7 @@ fn formatIntLiteral(
.mod = data.mod,
}, fmt, options, writer);
+ if (!int.positive and c_bits > 64 and !use_twos_comp) try writer.writeByte(')');
return writer.writeByte(')');
}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 7d70a51666..235949d117 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -520,6 +520,10 @@ pub const Object = struct {
if (options.pie) llvm_module.setModulePIELevel();
if (code_model != .Default) llvm_module.setModuleCodeModel(code_model);
+ if (options.opt_bisect_limit >= 0) {
+ context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit));
+ }
+
return Object{
.gpa = gpa,
.module = options.module.?,
@@ -582,7 +586,7 @@ pub const Object = struct {
const mod = self.module;
const target = mod.getTarget();
- const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space
+ const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
const type_fields = [_]*llvm.Type{
llvm_ptr_ty,
@@ -608,7 +612,7 @@ pub const Object = struct {
str_global.setAlignment(1);
const slice_fields = [_]*llvm.Value{
- str_global.constBitCast(llvm_ptr_ty),
+ str_global,
llvm_usize_ty.constInt(name.len, .False),
};
llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len);
@@ -623,7 +627,7 @@ pub const Object = struct {
error_name_table_global.setUnnamedAddr(.True);
error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode
- const error_name_table_ptr = error_name_table_global.constBitCast(llvm_slice_ty.pointerType(0)); // TODO: Address space
+ const error_name_table_ptr = error_name_table_global;
error_name_table_ptr_global.setInitializer(error_name_table_ptr);
}
@@ -681,10 +685,9 @@ pub const Object = struct {
const other_global = object.getLlvmGlobal(decl.name) orelse continue;
if (other_global == llvm_global) continue;
- const new_global_ptr = other_global.constBitCast(llvm_global.typeOf());
- llvm_global.replaceAllUsesWith(new_global_ptr);
+ llvm_global.replaceAllUsesWith(other_global);
deleteLlvmGlobal(llvm_global);
- entry.value_ptr.* = new_global_ptr;
+ entry.value_ptr.* = other_global;
}
object.extern_collisions.clearRetainingCapacity();
@@ -703,11 +706,7 @@ pub const Object = struct {
const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue;
if (other_global == llvm_global) continue;
- // replaceAllUsesWith requires the type to be unchanged. So we bitcast
- // the new global to the old type and use that as the thing to replace
- // old uses.
- const new_global_ptr = llvm_global.constBitCast(other_global.typeOf());
- other_global.replaceAllUsesWith(new_global_ptr);
+ other_global.replaceAllUsesWith(llvm_global);
llvm_global.takeName(other_global);
deleteLlvmGlobal(other_global);
// Problem: now we need to replace in the decl_map that
@@ -962,7 +961,7 @@ pub const Object = struct {
if (isByRef(param_ty)) {
const alignment = param_ty.abiAlignment(target);
const param_llvm_ty = param.typeOf();
- const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
+ const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
const store_inst = builder.buildStore(param, arg_ptr);
store_inst.setAlignment(alignment);
args.appendAssumeCapacity(arg_ptr);
@@ -1020,14 +1019,12 @@ pub const Object = struct {
const param_llvm_ty = try dg.lowerType(param_ty);
const abi_size = @intCast(c_uint, param_ty.abiSize(target));
const int_llvm_ty = dg.context.intType(abi_size * 8);
- const int_ptr_llvm_ty = int_llvm_ty.pointerType(0);
const alignment = @max(
param_ty.abiAlignment(target),
dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
- const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
- const casted_ptr = builder.buildBitCast(arg_ptr, int_ptr_llvm_ty, "");
- const store_inst = builder.buildStore(param, casted_ptr);
+ const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
+ const store_inst = builder.buildStore(param, arg_ptr);
store_inst.setAlignment(alignment);
try args.ensureUnusedCapacity(1);
@@ -1078,14 +1075,13 @@ pub const Object = struct {
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(target);
- const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target);
+ const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
- const casted_ptr = builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
for (field_types) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const field_ptr = builder.buildStructGEP(llvm_ty, casted_ptr, field_i, "");
+ const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
const store_inst = builder.buildStore(param, field_ptr);
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
}
@@ -1113,9 +1109,8 @@ pub const Object = struct {
llvm_arg_i += 1;
const alignment = param_ty.abiAlignment(target);
- const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
- const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), "");
- _ = builder.buildStore(param, casted_ptr);
+ const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
+ _ = builder.buildStore(param, arg_ptr);
if (isByRef(param_ty)) {
try args.append(arg_ptr);
@@ -1132,9 +1127,8 @@ pub const Object = struct {
llvm_arg_i += 1;
const alignment = param_ty.abiAlignment(target);
- const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
- const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), "");
- _ = builder.buildStore(param, casted_ptr);
+ const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
+ _ = builder.buildStore(param, arg_ptr);
if (isByRef(param_ty)) {
try args.append(arg_ptr);
@@ -1469,7 +1463,8 @@ pub const Object = struct {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_type = dib.createBasicType(name, info.bits, dwarf_encoding);
+ const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_type = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
},
@@ -1560,7 +1555,8 @@ pub const Object = struct {
return di_type;
},
.Bool => {
- const di_type = dib.createBasicType("bool", 1, DW.ATE.boolean);
+ const di_bits = 8; // lldb cannot handle non-byte sized types
+ const di_type = dib.createBasicType("bool", di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
},
@@ -1572,6 +1568,7 @@ pub const Object = struct {
ptr_info.@"addrspace" != .generic or
ptr_info.bit_offset != 0 or
ptr_info.host_size != 0 or
+ ptr_info.vector_index != .none or
ptr_info.@"allowzero" or
!ptr_info.mutable or
ptr_info.@"volatile" or
@@ -1732,10 +1729,31 @@ pub const Object = struct {
return array_di_ty;
},
.Vector => {
+ const elem_ty = ty.elemType2();
+ // Vector elements cannot be padded since that would make
+ // @bitSizOf(elem) * len > @bitSizOf(vec).
+ // Neither gdb nor lldb seem to be able to display non-byte sized
+ // vectors properly.
+ const elem_di_type = switch (elem_ty.zigTypeTag()) {
+ .Int => blk: {
+ const info = elem_ty.intInfo(target);
+ assert(info.bits != 0);
+ const name = try ty.nameAlloc(gpa, o.module);
+ defer gpa.free(name);
+ const dwarf_encoding: c_uint = switch (info.signedness) {
+ .signed => DW.ATE.signed,
+ .unsigned => DW.ATE.unsigned,
+ };
+ break :blk dib.createBasicType(name, info.bits, dwarf_encoding);
+ },
+ .Bool => dib.createBasicType("bool", 1, DW.ATE.boolean),
+ else => try o.lowerDebugType(ty.childType(), .full),
+ };
+
const vector_di_ty = dib.createVectorType(
ty.abiSize(target) * 8,
ty.abiAlignment(target) * 8,
- try o.lowerDebugType(ty.childType(), .full),
+ elem_di_type,
ty.vectorLen(),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -1748,7 +1766,8 @@ pub const Object = struct {
var buf: Type.Payload.ElemType = undefined;
const child_ty = ty.optionalChild(&buf);
if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
- const di_ty = dib.createBasicType(name, 1, DW.ATE.boolean);
+ const di_bits = 8; // lldb cannot handle non-byte sized types
+ const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
@@ -1937,12 +1956,14 @@ pub const Object = struct {
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
+ assert(struct_obj.haveLayout());
const info = struct_obj.backing_int_ty.intInfo(target);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_ty = dib.createBasicType(name, info.bits, dwarf_encoding);
+ const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
@@ -2476,12 +2497,8 @@ pub const DeclGen = struct {
new_global.setAlignment(global.getAlignment());
if (decl.@"linksection") |section| new_global.setSection(section);
new_global.setInitializer(llvm_init);
- // replaceAllUsesWith requires the type to be unchanged. So we convert
- // the new global to the old type and use that as the thing to replace
- // old uses.
// TODO: How should this work then the address space of a global changed?
- const new_global_ptr = new_global.constBitCast(global.typeOf());
- global.replaceAllUsesWith(new_global_ptr);
+ global.replaceAllUsesWith(new_global);
dg.object.decl_map.putAssumeCapacity(decl_index, new_global);
new_global.takeName(global);
global.deleteGlobal();
@@ -2780,11 +2797,7 @@ pub const DeclGen = struct {
}
const ptr_info = t.ptrInfo().data;
const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target);
- if (ptr_info.host_size != 0) {
- return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace);
- }
- const llvm_elem_ty = try dg.lowerPtrElemTy(ptr_info.pointee_type);
- return llvm_elem_ty.pointerType(llvm_addrspace);
+ return dg.context.pointerType(llvm_addrspace);
},
.Opaque => switch (t.tag()) {
.@"opaque" => {
@@ -2948,6 +2961,7 @@ pub const DeclGen = struct {
const struct_obj = t.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
const int_llvm_ty = try dg.lowerType(struct_obj.backing_int_ty);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
@@ -2968,7 +2982,7 @@ pub const DeclGen = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- var big_align: u32 = 0;
+ var big_align: u32 = 1;
var any_underaligned_fields = false;
for (struct_obj.fields.values()) |field| {
@@ -3107,8 +3121,7 @@ pub const DeclGen = struct {
defer llvm_params.deinit();
if (firstParamSRet(fn_info, target)) {
- const llvm_sret_ty = try dg.lowerType(fn_info.return_type);
- try llvm_params.append(llvm_sret_ty.pointerType(0));
+ try llvm_params.append(dg.context.pointerType(0));
}
if (fn_info.return_type.isError() and
@@ -3130,9 +3143,7 @@ pub const DeclGen = struct {
try llvm_params.append(try dg.lowerType(param_ty));
},
.byref, .byref_mut => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
- const raw_llvm_ty = try dg.lowerType(param_ty);
- try llvm_params.append(raw_llvm_ty.pointerType(0));
+ try llvm_params.append(dg.context.pointerType(0));
},
.abi_sized_int => {
const param_ty = fn_info.param_types[it.zig_index - 1];
@@ -3322,16 +3333,12 @@ pub const DeclGen = struct {
const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
- const llvm_var_type = try dg.lowerType(tv.ty);
- const llvm_actual_ptr_type = llvm_var_type.pointerType(llvm_actual_addrspace);
-
const val = try dg.resolveGlobalDecl(decl_index);
- const val_ptr = val.constBitCast(llvm_actual_ptr_type);
- if (llvm_actual_addrspace != llvm_wanted_addrspace) {
- const llvm_wanted_ptr_type = llvm_var_type.pointerType(llvm_wanted_addrspace);
- return val_ptr.constAddrSpaceCast(llvm_wanted_ptr_type);
- }
- return val_ptr;
+ const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
+ val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace))
+ else
+ val;
+ return addrspace_casted_ptr;
},
.slice => {
const slice = tv.val.castTag(.slice).?.data;
@@ -3354,12 +3361,16 @@ pub const DeclGen = struct {
return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
},
.field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
- return dg.lowerParentPtr(tv.val, tv.ty.childType());
+ return dg.lowerParentPtr(tv.val);
},
.null_value, .zero => {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.constNull();
},
+ .opt_payload => {
+ const payload = tv.val.castTag(.opt_payload).?.data;
+ return dg.lowerParentPtr(payload);
+ },
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
tv.ty.fmtDebug(), tag,
}),
@@ -3652,6 +3663,7 @@ pub const DeclGen = struct {
const struct_obj = tv.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(target);
const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
@@ -3881,6 +3893,32 @@ pub const DeclGen = struct {
@intCast(c_uint, llvm_elems.len),
);
},
+ .str_lit => {
+ // Note, sentinel is not stored
+ const str_lit = tv.val.castTag(.str_lit).?.data;
+ const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ const vector_len = @intCast(usize, tv.ty.arrayLen());
+ assert(vector_len == bytes.len);
+
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
+ defer dg.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ var byte_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = bytes[i],
+ };
+
+ elem.* = try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = Value.initPayload(&byte_payload.base),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
else => unreachable,
},
@@ -3909,7 +3947,6 @@ pub const DeclGen = struct {
dg: *DeclGen,
ptr_val: Value,
decl_index: Module.Decl.Index,
- ptr_child_ty: Type,
) Error!*llvm.Value {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
@@ -3918,62 +3955,54 @@ pub const DeclGen = struct {
.data = decl.ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
-
- if (ptr_child_ty.eql(decl.ty, dg.module)) {
- return llvm_ptr;
- } else {
- return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0));
- }
+ return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
}
- fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*llvm.Value {
+ fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!*llvm.Value {
const target = dg.module.getTarget();
- var bitcast_needed: bool = undefined;
- const llvm_ptr = switch (ptr_val.tag()) {
+ switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
- return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty);
+ return dg.lowerParentPtrDecl(ptr_val, decl);
},
.decl_ref => {
const decl = ptr_val.castTag(.decl_ref).?.data;
- return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty);
+ return dg.lowerParentPtrDecl(ptr_val, decl);
},
.variable => {
const decl = ptr_val.castTag(.variable).?.data.owner_decl;
- return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty);
+ return dg.lowerParentPtrDecl(ptr_val, decl);
},
.int_i64 => {
const int = ptr_val.castTag(.int_i64).?.data;
const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False);
- return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0));
+ return llvm_int.constIntToPtr(dg.context.pointerType(0));
},
.int_u64 => {
const int = ptr_val.castTag(.int_u64).?.data;
const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(int, .False);
- return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0));
+ return llvm_int.constIntToPtr(dg.context.pointerType(0));
},
- .field_ptr => blk: {
+ .field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, field_ptr.container_ty);
+ const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr);
const parent_ty = field_ptr.container_ty;
const field_index = @intCast(u32, field_ptr.field_index);
const llvm_u32 = dg.context.intType(32);
switch (parent_ty.zigTypeTag()) {
.Union => {
- bitcast_needed = true;
if (parent_ty.containerLayout() == .Packed) {
- break :blk parent_llvm_ptr;
+ return parent_llvm_ptr;
}
const layout = parent_ty.unionGetLayout(target);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
- break :blk parent_llvm_ptr;
+ return parent_llvm_ptr;
}
const llvm_pl_index = if (layout.tag_size == 0)
0
@@ -3984,10 +4013,9 @@ pub const DeclGen = struct {
llvm_u32.constInt(llvm_pl_index, .False),
};
const parent_llvm_ty = try dg.lowerType(parent_ty);
- break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.Struct => {
- const field_ty = parent_ty.structFieldType(field_index);
if (parent_ty.containerLayout() == .Packed) {
const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
@@ -4002,20 +4030,24 @@ pub const DeclGen = struct {
};
const byte_offset = llvm_usize.constInt(prev_bits / 8, .False);
const field_addr = base_addr.constAdd(byte_offset);
- bitcast_needed = false;
- const final_llvm_ty = (try dg.lowerType(ptr_child_ty)).pointerType(0);
- break :blk field_addr.constIntToPtr(final_llvm_ty);
+ const final_llvm_ty = dg.context.pointerType(0);
+ return field_addr.constIntToPtr(final_llvm_ty);
}
- bitcast_needed = !field_ty.eql(ptr_child_ty, dg.module);
var ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?;
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_field_index, .False),
- };
+
const parent_llvm_ty = try dg.lowerType(parent_ty);
- break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| {
+ const indices: [2]*llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(llvm_field_index, .False),
+ };
+ return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ } else {
+ const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False);
+ const indices: [1]*llvm.Value = .{llvm_index};
+ return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ }
},
.Pointer => {
assert(parent_ty.isSlice());
@@ -4024,37 +4056,34 @@ pub const DeclGen = struct {
llvm_u32.constInt(field_index, .False),
};
const parent_llvm_ty = try dg.lowerType(parent_ty);
- break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
else => unreachable,
}
},
- .elem_ptr => blk: {
+ .elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
- bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module);
+ const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr);
const llvm_usize = try dg.lowerType(Type.usize);
const indices: [1]*llvm.Value = .{
llvm_usize.constInt(elem_ptr.index, .False),
};
const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty);
- break :blk elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
- .opt_payload_ptr => blk: {
+ .opt_payload_ptr => {
const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, opt_payload_ptr.container_ty);
+ const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
- bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module);
-
if (!payload_ty.hasRuntimeBitsIgnoreComptime() or
payload_ty.optionalReprIsPayload())
{
// In this case, we represent pointer to optional the same as pointer
// to the payload.
- break :blk parent_llvm_ptr;
+ return parent_llvm_ptr;
}
const llvm_u32 = dg.context.intType(32);
@@ -4063,19 +4092,17 @@ pub const DeclGen = struct {
llvm_u32.constInt(0, .False),
};
const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty);
- break :blk opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
- .eu_payload_ptr => blk: {
+ .eu_payload_ptr => {
const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty);
+ const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr);
const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload();
- bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module);
-
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
// In this case, we represent pointer to error union the same as pointer
// to the payload.
- break :blk parent_llvm_ptr;
+ return parent_llvm_ptr;
}
const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1;
@@ -4085,14 +4112,9 @@ pub const DeclGen = struct {
llvm_u32.constInt(payload_offset, .False),
};
const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty);
- break :blk eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
else => unreachable,
- };
- if (bitcast_needed) {
- return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0));
- } else {
- return llvm_ptr;
}
}
@@ -4151,8 +4173,7 @@ pub const DeclGen = struct {
const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: {
- const llvm_decl_ty = try self.lowerType(decl.ty);
- const llvm_decl_wanted_ptr_ty = llvm_decl_ty.pointerType(llvm_wanted_addrspace);
+ const llvm_decl_wanted_ptr_ty = self.context.pointerType(llvm_wanted_addrspace);
break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty);
} else llvm_decl_val;
@@ -4433,18 +4454,11 @@ pub const FuncGen = struct {
global.setGlobalConstant(.True);
global.setUnnamedAddr(.True);
global.setAlignment(tv.ty.abiAlignment(target));
- // Because of LLVM limitations for lowering certain types such as unions,
- // the type of global constants might not match the type it is supposed to
- // be, and so we must bitcast the pointer at the usage sites.
- const wanted_llvm_ty = try self.dg.lowerType(tv.ty);
- const wanted_bitcasted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_actual_addrspace);
- const bitcasted_ptr = global.constBitCast(wanted_bitcasted_llvm_ptr_ty);
- const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_wanted_addrspace);
- const casted_ptr = if (llvm_wanted_addrspace != llvm_actual_addrspace)
- bitcasted_ptr.constAddrSpaceCast(wanted_llvm_ptr_ty)
+ const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
+ global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace))
else
- bitcasted_ptr;
- return casted_ptr;
+ global;
+ return addrspace_casted_ptr;
}
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
@@ -4660,6 +4674,8 @@ pub const FuncGen = struct {
.wasm_memory_size => try self.airWasmMemorySize(inst),
.wasm_memory_grow => try self.airWasmMemoryGrow(inst),
+ .vector_store_elem => try self.airVectorStoreElem(inst),
+
.constant => unreachable,
.const_ty => unreachable,
.unreach => self.airUnreach(inst),
@@ -4670,6 +4686,11 @@ pub const FuncGen = struct {
.dbg_block_end => try self.airDbgBlockEnd(),
.dbg_var_ptr => try self.airDbgVarPtr(inst),
.dbg_var_val => try self.airDbgVarVal(inst),
+
+ .c_va_arg => try self.airCVaArg(inst),
+ .c_va_copy => try self.airCVaCopy(inst),
+ .c_va_end => try self.airCVaEnd(inst),
+ .c_va_start => try self.airCVaStart(inst),
// zig fmt: on
};
if (opt_value) |val| {
@@ -4725,17 +4746,7 @@ pub const FuncGen = struct {
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
} else {
- if (param_ty.zigTypeTag() == .Pointer) {
- // We need a bitcast in case of two possibilities:
- // 1. The parameter type is a pointer to zero-sized type,
- // which is always lowered to an LLVM type of `*i8`.
- // 2. The argument is a global which does act as a pointer, however
- // a bitcast is needed in order for the LLVM types to match.
- const casted_ptr = self.builder.buildBitCast(llvm_arg, llvm_param_ty, "");
- try llvm_args.append(casted_ptr);
- } else {
- try llvm_args.append(llvm_arg);
- }
+ try llvm_args.append(llvm_arg);
}
},
.byref => {
@@ -4779,26 +4790,22 @@ pub const FuncGen = struct {
const param_ty = self.air.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const abi_size = @intCast(c_uint, param_ty.abiSize(target));
- const int_llvm_ty = self.dg.context.intType(abi_size * 8);
- const int_ptr_llvm_ty = int_llvm_ty.pointerType(0);
+ const int_llvm_ty = self.context.intType(abi_size * 8);
if (isByRef(param_ty)) {
const alignment = param_ty.abiAlignment(target);
- const casted_ptr = self.builder.buildBitCast(llvm_arg, int_ptr_llvm_ty, "");
- const load_inst = self.builder.buildLoad(int_llvm_ty, casted_ptr, "");
+ const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
} else {
// LLVM does not allow bitcasting structs so we must allocate
- // a local, bitcast its pointer, store, and then load.
+ // a local, store as one type, and then load as another type.
const alignment = @max(
param_ty.abiAlignment(target),
self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
const int_ptr = self.buildAlloca(int_llvm_ty, alignment);
- const param_llvm_ty = try self.dg.lowerType(param_ty);
- const casted_ptr = self.builder.buildBitCast(int_ptr, param_llvm_ty.pointerType(0), "");
- const store_inst = self.builder.buildStore(llvm_arg, casted_ptr);
+ const store_inst = self.builder.buildStore(llvm_arg, int_ptr);
store_inst.setAlignment(alignment);
const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, "");
load_inst.setAlignment(alignment);
@@ -4827,12 +4834,11 @@ pub const FuncGen = struct {
break :p p;
};
- const llvm_ty = self.dg.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
- const casted_ptr = self.builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
+ const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_types) |field_ty, i_usize| {
const i = @intCast(c_uint, i_usize);
- const field_ptr = self.builder.buildStructGEP(llvm_ty, casted_ptr, i, "");
+ const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
llvm_args.appendAssumeCapacity(load_inst);
@@ -4841,7 +4847,7 @@ pub const FuncGen = struct {
.as_u16 => {
const arg = args[it.zig_index - 1];
const llvm_arg = try self.resolveInst(arg);
- const casted = self.builder.buildBitCast(llvm_arg, self.dg.context.intType(16), "");
+ const casted = self.builder.buildBitCast(llvm_arg, self.context.intType(16), "");
try llvm_args.append(casted);
},
.float_array => |count| {
@@ -4858,9 +4864,8 @@ pub const FuncGen = struct {
const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?);
const array_llvm_ty = float_ty.arrayType(count);
- const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), "");
const alignment = arg_ty.abiAlignment(target);
- const load_inst = self.builder.buildLoad(array_llvm_ty, casted, "");
+ const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
},
@@ -4876,10 +4881,9 @@ pub const FuncGen = struct {
llvm_arg = store_inst;
}
- const array_llvm_ty = self.dg.context.intType(elem_size).arrayType(arr_len);
- const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), "");
+ const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len);
const alignment = arg_ty.abiAlignment(target);
- const load_inst = self.builder.buildLoad(array_llvm_ty, casted, "");
+ const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
},
@@ -4983,12 +4987,10 @@ pub const FuncGen = struct {
if (abi_ret_ty != llvm_ret_ty) {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
- // by bitcasting a pointer to our canonical type, then loading it if necessary.
+ // by using our canonical type, then loading it if necessary.
const alignment = self.dg.object.target_data.abiAlignmentOfType(abi_ret_ty);
const rp = self.buildAlloca(llvm_ret_ty, alignment);
- const ptr_abi_ty = abi_ret_ty.pointerType(0);
- const casted_ptr = self.builder.buildBitCast(rp, ptr_abi_ty, "");
- const store_inst = self.builder.buildStore(call, casted_ptr);
+ const store_inst = self.builder.buildStore(call, rp);
store_inst.setAlignment(alignment);
if (isByRef(return_type)) {
return rp;
@@ -5022,7 +5024,7 @@ pub const FuncGen = struct {
.data = ret_ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- self.store(ret_ptr, ptr_ty, operand, .NotAtomic);
+ try self.store(ret_ptr, ptr_ty, operand, .NotAtomic);
_ = self.builder.buildRetVoid();
return null;
}
@@ -5041,7 +5043,6 @@ pub const FuncGen = struct {
}
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
- const ptr_abi_ty = abi_ret_ty.pointerType(0);
const operand = try self.resolveInst(un_op);
const target = self.dg.module.getTarget();
const alignment = ret_ty.abiAlignment(target);
@@ -5049,8 +5050,7 @@ pub const FuncGen = struct {
if (isByRef(ret_ty)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
- const casted_ptr = self.builder.buildBitCast(operand, ptr_abi_ty, "");
- const load_inst = self.builder.buildLoad(abi_ret_ty, casted_ptr, "");
+ const load_inst = self.builder.buildLoad(abi_ret_ty, operand, "");
load_inst.setAlignment(alignment);
_ = self.builder.buildRet(load_inst);
return null;
@@ -5065,8 +5065,7 @@ pub const FuncGen = struct {
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const store_inst = self.builder.buildStore(operand, rp);
store_inst.setAlignment(alignment);
- const casted_ptr = self.builder.buildBitCast(rp, ptr_abi_ty, "");
- const load_inst = self.builder.buildLoad(abi_ret_ty, casted_ptr, "");
+ const load_inst = self.builder.buildLoad(abi_ret_ty, rp, "");
load_inst.setAlignment(alignment);
_ = self.builder.buildRet(load_inst);
return null;
@@ -5096,17 +5095,100 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(un_op);
const target = self.dg.module.getTarget();
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
- const llvm_ret_ty = try self.dg.lowerType(ret_ty);
- const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: {
- const ptr_abi_ty = abi_ret_ty.pointerType(0);
- break :p self.builder.buildBitCast(ptr, ptr_abi_ty, "");
- };
- const loaded = self.builder.buildLoad(abi_ret_ty, casted_ptr, "");
+ const loaded = self.builder.buildLoad(abi_ret_ty, ptr, "");
loaded.setAlignment(ret_ty.abiAlignment(target));
_ = self.builder.buildRet(loaded);
return null;
}
+ fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const list = try self.resolveInst(ty_op.operand);
+ const arg_ty = self.air.getRefType(ty_op.ty);
+ const llvm_arg_ty = try self.dg.lowerType(arg_ty);
+
+ return self.builder.buildVAArg(list, llvm_arg_ty, "");
+ }
+
+ fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const src_list = try self.resolveInst(ty_op.operand);
+ const va_list_ty = self.air.getRefType(ty_op.ty);
+ const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+
+ const target = self.dg.module.getTarget();
+ const result_alignment = va_list_ty.abiAlignment(target);
+ const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+
+ const llvm_fn_name = "llvm.va_copy";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{
+ self.context.pointerType(0),
+ self.context.pointerType(0),
+ };
+ const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const args: [2]*llvm.Value = .{ dest_list, src_list };
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+
+ if (isByRef(va_list_ty)) {
+ return dest_list;
+ } else {
+ const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, "");
+ loaded.setAlignment(result_alignment);
+ return loaded;
+ }
+ }
+
+ fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const list = try self.resolveInst(un_op);
+
+ const llvm_fn_name = "llvm.va_end";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{self.context.pointerType(0)};
+ const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+ const args: [1]*llvm.Value = .{list};
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ return null;
+ }
+
+ fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const va_list_ty = self.air.typeOfIndex(inst);
+ const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+
+ const target = self.dg.module.getTarget();
+ const result_alignment = va_list_ty.abiAlignment(target);
+ const list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+
+ const llvm_fn_name = "llvm.va_start";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{self.context.pointerType(0)};
+ const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+ const args: [1]*llvm.Value = .{list};
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+
+ if (isByRef(va_list_ty)) {
+ return list;
+ } else {
+ const loaded = self.builder.buildLoad(llvm_va_list_ty, list, "");
+ loaded.setAlignment(result_alignment);
+ return loaded;
+ }
+ }
+
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -5284,7 +5366,7 @@ pub const FuncGen = struct {
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
if (is_body or isByRef(inst_ty)) {
- break :ty raw_llvm_ty.pointerType(0);
+ break :ty self.context.pointerType(0);
}
break :ty raw_llvm_ty;
};
@@ -5349,7 +5431,7 @@ pub const FuncGen = struct {
const payload_ty = self.air.typeOfIndex(inst);
const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
- return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused, payload_ty);
+ return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
@@ -5358,9 +5440,8 @@ pub const FuncGen = struct {
const err_union_ptr = try self.resolveInst(extra.data.ptr);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(extra.data.ptr).childType();
- const payload_ty = self.air.typeOfIndex(inst);
const is_unused = self.liveness.isUnused(inst);
- return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused, payload_ty);
+ return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused);
}
fn lowerTry(
@@ -5371,7 +5452,6 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
can_elide_load: bool,
is_unused: bool,
- result_ty: Type,
) !?*llvm.Value {
const payload_ty = err_union_ty.errorUnionPayload();
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
@@ -5414,12 +5494,7 @@ pub const FuncGen = struct {
return null;
}
if (!payload_has_bits) {
- if (!operand_is_ptr) return null;
-
- // TODO once we update to an LLVM version with opaque pointers
- // this bitcast won't be necessary.
- const res_ptr_ty = try fg.dg.lowerType(result_ty);
- return fg.builder.buildBitCast(err_union, res_ptr_ty, "");
+ return if (operand_is_ptr) err_union else null;
}
const offset = errUnionPayloadOffset(payload_ty, target);
if (operand_is_ptr) {
@@ -5779,6 +5854,10 @@ pub const FuncGen = struct {
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
+
+ const elem_ptr = self.air.getRefType(ty_pl.ty);
+ if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr;
+
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
if (ptr_ty.isSinglePointer()) {
// If this is a single-item pointer to an array, we need another index in the GEP.
@@ -5840,7 +5919,7 @@ pub const FuncGen = struct {
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float) {
+ if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
@@ -5863,7 +5942,7 @@ pub const FuncGen = struct {
assert(struct_ty.containerLayout() == .Packed);
const containing_int = struct_llvm_val;
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float) {
+ if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
@@ -5901,9 +5980,8 @@ pub const FuncGen = struct {
const union_llvm_ty = try self.dg.lowerType(struct_ty);
const layout = struct_ty.unionGetLayout(target);
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
- const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, "");
+ const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, "");
const llvm_field_ty = try self.dg.lowerType(field_ty);
- const field_ptr = self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
if (isByRef(field_ty)) {
if (canElideLoad(self, body_tail))
return field_ptr;
@@ -5931,7 +6009,7 @@ pub const FuncGen = struct {
const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
if (field_offset == 0) {
- return self.builder.buildBitCast(field_ptr, res_ty, "");
+ return field_ptr;
}
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
@@ -6498,7 +6576,7 @@ pub const FuncGen = struct {
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
operand;
- const llvm_i8 = self.dg.context.intType(8);
+ const llvm_i8 = self.context.intType(8);
return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), "");
}
@@ -6564,16 +6642,12 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
- const result_ty = self.air.getRefType(ty_op.ty);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
-
- // TODO once we update to LLVM 16 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.lowerType(result_ty);
- return self.builder.buildBitCast(operand, res_ptr_ty, "");
+ return operand;
}
if (optional_ty.optionalReprIsPayload()) {
// The payload and the optional are the same value.
@@ -6589,17 +6663,13 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
- const result_ty = self.air.getRefType(ty_op.ty);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
const non_null_bit = self.context.intType(8).constInt(1, .False);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
_ = self.builder.buildStore(non_null_bit, operand);
-
- // TODO once we update to LLVM 16 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.lowerType(result_ty);
- return self.builder.buildBitCast(operand, res_ptr_ty, "");
+ return operand;
}
if (optional_ty.optionalReprIsPayload()) {
// The payload and the optional are the same value.
@@ -6657,11 +6727,7 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- if (!operand_is_ptr) return null;
-
- // TODO once we update to LLVM 14 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.lowerType(result_ty);
- return self.builder.buildBitCast(operand, res_ptr_ty, "");
+ return if (operand_is_ptr) operand else null;
}
const offset = errUnionPayloadOffset(payload_ty, target);
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
@@ -6697,7 +6763,7 @@ pub const FuncGen = struct {
if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
const err_llvm_ty = try self.dg.lowerType(Type.anyerror);
if (operand_is_ptr) {
- return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), "");
+ return operand;
} else {
return err_llvm_ty.constInt(0, .False);
}
@@ -6803,7 +6869,7 @@ pub const FuncGen = struct {
.data = payload_ty,
};
const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
+ try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, "");
_ = self.builder.buildStore(non_null_bit, non_null_ptr);
return optional_ptr;
@@ -6839,7 +6905,7 @@ pub const FuncGen = struct {
.data = payload_ty,
};
const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
+ try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
return result_ptr;
}
@@ -6908,6 +6974,28 @@ pub const FuncGen = struct {
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
}
+ fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const data = self.air.instructions.items(.data)[inst].vector_store_elem;
+ const extra = self.air.extraData(Air.Bin, data.payload).data;
+
+ const vector_ptr = try self.resolveInst(data.vector_ptr);
+ const vector_ptr_ty = self.air.typeOf(data.vector_ptr);
+ const index = try self.resolveInst(extra.lhs);
+ const operand = try self.resolveInst(extra.rhs);
+
+ const loaded_vector = blk: {
+ const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.elemType2());
+ const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, "");
+ const target = self.dg.module.getTarget();
+ load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target));
+ load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr()));
+ break :blk load_inst;
+ };
+ const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, "");
+ try self.store(vector_ptr, vector_ptr_ty, modified_vector, .NotAtomic);
+ return null;
+ }
+
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -6945,14 +7033,8 @@ pub const FuncGen = struct {
const llvm_slice_ty = try self.dg.lowerType(inst_ty);
// In case of slicing a global, the result type looks something like `{ i8*, i64 }`
- // but `ptr` is pointing to the global directly. If it's an array, we would want to
- // do GEP(0,0), or we can just bitcast it to be correct, like we do here.
- // This prevents an assertion failure.
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = inst_ty.slicePtrFieldType(&buf);
- const ptr_llvm_ty = try self.dg.lowerType(ptr_ty);
- const casted_ptr = self.builder.buildBitCast(ptr, ptr_llvm_ty, "");
- const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), casted_ptr, 0, "");
+ // but `ptr` is pointing to the global directly.
+ const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
}
@@ -7477,7 +7559,7 @@ pub const FuncGen = struct {
.neg => {
// In this case we can generate a softfloat negation by XORing the
// bits with a constant.
- const int_llvm_ty = self.dg.context.intType(float_bits);
+ const int_llvm_ty = self.context.intType(float_bits);
const one = int_llvm_ty.constInt(1, .False);
const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
const sign_mask = one.constShl(shift_amt);
@@ -7886,8 +7968,8 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
if (operand_is_ref and result_is_ref) {
- // They are both pointers; just do a bitcast on the pointers :)
- return self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), "");
+ // They are both pointers, so just return the same opaque pointer :)
+ return operand;
}
if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) {
@@ -7902,9 +7984,7 @@ pub const FuncGen = struct {
const array_ptr = self.buildAlloca(llvm_dest_ty, null);
const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
if (bitcast_ok) {
- const llvm_vector_ty = try self.dg.lowerType(operand_ty);
- const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), "");
- const llvm_store = self.builder.buildStore(operand, casted_ptr);
+ const llvm_store = self.builder.buildStore(operand, array_ptr);
llvm_store.setAlignment(inst_ty.abiAlignment(target));
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -7933,9 +8013,7 @@ pub const FuncGen = struct {
const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
if (bitcast_ok) {
- const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0);
- const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, "");
- const vector = self.builder.buildLoad(llvm_vector_ty, casted_ptr, "");
+ const vector = self.builder.buildLoad(llvm_vector_ty, operand, "");
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
vector.setAlignment(elem_ty.abiAlignment(target));
@@ -7965,20 +8043,15 @@ pub const FuncGen = struct {
}
if (operand_is_ref) {
- // Bitcast the operand pointer, then load.
- const casted_ptr = self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), "");
- const load_inst = self.builder.buildLoad(llvm_dest_ty, casted_ptr, "");
+ const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, "");
load_inst.setAlignment(operand_ty.abiAlignment(target));
return load_inst;
}
if (result_is_ref) {
- // Bitcast the result pointer, then store.
const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
- const operand_llvm_ty = try self.dg.lowerType(operand_ty);
- const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
- const store_inst = self.builder.buildStore(operand, casted_ptr);
+ const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
return result_ptr;
}
@@ -7986,12 +8059,10 @@ pub const FuncGen = struct {
if (llvm_dest_ty.getTypeKind() == .Struct) {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values.
- // Therefore, we store operand to bitcasted alloca, then load for result.
+ // Therefore, we store operand to alloca, then load for result.
const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
- const operand_llvm_ty = try self.dg.lowerType(operand_ty);
- const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
- const store_inst = self.builder.buildStore(operand, casted_ptr);
+ const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, "");
load_inst.setAlignment(alignment);
@@ -8020,7 +8091,7 @@ pub const FuncGen = struct {
return arg_val;
}
- const src_index = self.getSrcArgIndex(self.arg_index - 1);
+ const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const func = self.dg.decl.getFunction().?;
const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
@@ -8053,16 +8124,6 @@ pub const FuncGen = struct {
return arg_val;
}
- fn getSrcArgIndex(self: *FuncGen, runtime_index: u32) u32 {
- const fn_info = self.dg.decl.ty.fnInfo();
- var i: u32 = 0;
- for (fn_info.param_types) |param_ty, src_index| {
- if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue;
- if (i == runtime_index) return @intCast(u32, src_index);
- i += 1;
- } else unreachable;
- }
-
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
@@ -8089,7 +8150,7 @@ pub const FuncGen = struct {
/// Use this instead of builder.buildAlloca, because this function makes sure to
/// put the alloca instruction at the top of the function!
fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) *llvm.Value {
- return buildAllocaInner(self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, self.dg.module.getTarget());
+ return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, self.dg.module.getTarget());
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
@@ -8123,19 +8184,17 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const operand_size = operand_ty.abiSize(target);
const u8_llvm_ty = self.context.intType(8);
- const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
- const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
const fill_char = u8_llvm_ty.constInt(0xaa, .False);
const dest_ptr_align = ptr_ty.ptrAlignment(target);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const len = usize_llvm_ty.constInt(operand_size, .False);
- _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+ _ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
if (self.dg.module.comp.bin_file.options.valgrind) {
self.valgrindMarkUndef(dest_ptr, len);
}
} else {
const src_operand = try self.resolveInst(bin_op.rhs);
- self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic);
+ try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic);
}
return null;
}
@@ -8206,7 +8265,7 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
const llvm_fn_name = "llvm.frameaddress.p0";
const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const llvm_p0i8 = self.context.intType(8).pointerType(0);
+ const llvm_p0i8 = self.context.pointerType(0);
const param_types = [_]*llvm.Type{llvm_i32};
const fn_type = llvm.functionType(llvm_p0i8, &param_types, param_types.len, .False);
break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
@@ -8229,14 +8288,13 @@ pub const FuncGen = struct {
fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
- var ptr = try self.resolveInst(extra.ptr);
+ const ptr = try self.resolveInst(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
const operand_ty = self.air.typeOf(extra.ptr).elemType();
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating
- ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
if (operand_ty.isSignedInt()) {
expected_value = self.builder.buildSExt(expected_value, abi_ty, "");
new_value = self.builder.buildSExt(new_value, abi_ty, "");
@@ -8288,7 +8346,6 @@ pub const FuncGen = struct {
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating or bitcasting.
- const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
const casted_operand = if (is_float)
self.builder.buildBitCast(operand, abi_ty, "")
else if (is_signed_int)
@@ -8298,7 +8355,7 @@ pub const FuncGen = struct {
const uncasted_result = self.builder.buildAtomicRmw(
op,
- casted_ptr,
+ ptr,
casted_operand,
ordering,
single_threaded,
@@ -8317,11 +8374,10 @@ pub const FuncGen = struct {
// It's a pointer but we need to treat it as an int.
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
- const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), "");
const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, "");
const uncasted_result = self.builder.buildAtomicRmw(
op,
- casted_ptr,
+ ptr,
casted_operand,
ordering,
single_threaded,
@@ -8349,8 +8405,7 @@ pub const FuncGen = struct {
if (opt_abi_llvm_ty) |abi_llvm_ty| {
// operand needs widening and truncating
- const casted_ptr = self.builder.buildBitCast(ptr, abi_llvm_ty.pointerType(0), "");
- const load_inst = self.builder.buildLoad(abi_llvm_ty, casted_ptr, "");
+ const load_inst = self.builder.buildLoad(abi_llvm_ty, ptr, "");
load_inst.setAlignment(ptr_alignment);
load_inst.setVolatile(ptr_volatile);
load_inst.setOrdering(ordering);
@@ -8372,20 +8427,19 @@ pub const FuncGen = struct {
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null;
- var ptr = try self.resolveInst(bin_op.lhs);
+ const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening
- ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
if (operand_ty.isSignedInt()) {
element = self.builder.buildSExt(element, abi_ty, "");
} else {
element = self.builder.buildZExt(element, abi_ty, "");
}
}
- self.store(ptr, ptr_ty, element, ordering);
+ try self.store(ptr, ptr_ty, element, ordering);
return null;
}
@@ -8398,15 +8452,13 @@ pub const FuncGen = struct {
const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndefDeep() else false;
const len = try self.resolveInst(extra.rhs);
const u8_llvm_ty = self.context.intType(8);
- const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
- const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value;
const target = self.dg.module.getTarget();
const dest_ptr_align = ptr_ty.ptrAlignment(target);
- _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+ _ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
- self.valgrindMarkUndef(dest_ptr_u8, len);
+ self.valgrindMarkUndef(dest_ptr, len);
}
return null;
}
@@ -8419,15 +8471,12 @@ pub const FuncGen = struct {
const src_ptr = try self.resolveInst(extra.lhs);
const src_ptr_ty = self.air.typeOf(extra.lhs);
const len = try self.resolveInst(extra.rhs);
- const llvm_ptr_u8 = self.context.intType(8).pointerType(0);
- const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, llvm_ptr_u8, "");
- const src_ptr_u8 = self.builder.buildBitCast(src_ptr, llvm_ptr_u8, "");
const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
const target = self.dg.module.getTarget();
_ = self.builder.buildMemCpy(
- dest_ptr_u8,
+ dest_ptr,
dest_ptr_ty.ptrAlignment(target),
- src_ptr_u8,
+ src_ptr,
src_ptr_ty.ptrAlignment(target),
len,
is_volatile,
@@ -8621,8 +8670,8 @@ pub const FuncGen = struct {
const error_set_ty = self.air.getRefType(ty_op.ty);
const names = error_set_ty.errorSetNames();
- const valid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Valid");
- const invalid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Invalid");
+ const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid");
+ const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid");
const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
@@ -8648,7 +8697,7 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(end_block);
- const llvm_type = self.dg.context.intType(1);
+ const llvm_type = self.context.intType(1);
const incoming_values: [2]*llvm.Value = .{
llvm_type.constInt(1, .False), llvm_type.constInt(0, .False),
};
@@ -8710,13 +8759,13 @@ pub const FuncGen = struct {
}
}
- const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry");
+ const entry_block = self.context.appendBasicBlock(fn_val, "Entry");
self.builder.positionBuilderAtEnd(entry_block);
self.builder.clearCurrentDebugLocation();
const fields = enum_ty.enumFields();
- const named_block = self.dg.context.appendBasicBlock(fn_val, "Named");
- const unnamed_block = self.dg.context.appendBasicBlock(fn_val, "Unnamed");
+ const named_block = self.context.appendBasicBlock(fn_val, "Named");
+ const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed");
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
@@ -8734,10 +8783,10 @@ pub const FuncGen = struct {
switch_instr.addCase(this_tag_int_value, named_block);
}
self.builder.positionBuilderAtEnd(named_block);
- _ = self.builder.buildRet(self.dg.context.intType(1).constInt(1, .False));
+ _ = self.builder.buildRet(self.context.intType(1).constInt(1, .False));
self.builder.positionBuilderAtEnd(unnamed_block);
- _ = self.builder.buildRet(self.dg.context.intType(1).constInt(0, .False));
+ _ = self.builder.buildRet(self.context.intType(1).constInt(0, .False));
return fn_val;
}
@@ -8796,12 +8845,12 @@ pub const FuncGen = struct {
}
}
- const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry");
+ const entry_block = self.context.appendBasicBlock(fn_val, "Entry");
self.builder.positionBuilderAtEnd(entry_block);
self.builder.clearCurrentDebugLocation();
const fields = enum_ty.enumFields();
- const bad_value_block = self.dg.context.appendBasicBlock(fn_val, "BadValue");
+ const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue");
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count()));
@@ -8810,7 +8859,7 @@ pub const FuncGen = struct {
};
for (fields.keys()) |name, field_index| {
- const str_init = self.dg.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+ const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, "");
str_global.setInitializer(str_init);
@@ -8831,7 +8880,7 @@ pub const FuncGen = struct {
slice_global.setUnnamedAddr(.True);
slice_global.setAlignment(slice_alignment);
- const return_block = self.dg.context.appendBasicBlock(fn_val, "Name");
+ const return_block = self.context.appendBasicBlock(fn_val, "Name");
const this_tag_int_value = int: {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
@@ -8883,7 +8932,7 @@ pub const FuncGen = struct {
const slice_llvm_ty = try self.dg.lowerType(slice_ty);
const error_name_table_ptr = try self.getErrorNameTable();
- const ptr_slice_llvm_ty = slice_llvm_ty.pointerType(0);
+ const ptr_slice_llvm_ty = self.context.pointerType(0);
const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, "");
const indices = [_]*llvm.Value{operand};
const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, "");
@@ -9125,8 +9174,9 @@ pub const FuncGen = struct {
.Struct => {
if (result_ty.containerLayout() == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(target);
- const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
+ const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
@@ -9137,7 +9187,7 @@ pub const FuncGen = struct {
const non_int_val = try self.resolveInst(elem);
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
- const small_int_ty = self.dg.context.intType(ty_bit_size);
+ const small_int_ty = self.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime())
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
@@ -9178,7 +9228,7 @@ pub const FuncGen = struct {
},
};
const field_ptr_ty = Type.initPayload(&field_ptr_payload.base);
- self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic);
}
return alloca_inst;
@@ -9216,7 +9266,7 @@ pub const FuncGen = struct {
};
const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
const llvm_elem = try self.resolveInst(elem);
- self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
}
if (array_info.sentinel) |sent_val| {
const indices: [2]*llvm.Value = .{
@@ -9229,7 +9279,7 @@ pub const FuncGen = struct {
.val = sent_val,
});
- self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
}
return alloca_inst;
@@ -9251,11 +9301,11 @@ pub const FuncGen = struct {
if (union_obj.layout == .Packed) {
const big_bits = union_ty.bitSize(target);
- const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
+ const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const field = union_obj.fields.values()[extra.field_index];
const non_int_val = try self.resolveInst(extra.init);
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
- const small_int_ty = self.dg.context.intType(ty_bit_size);
+ const small_int_ty = self.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime())
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
@@ -9285,9 +9335,9 @@ pub const FuncGen = struct {
}
assert(isByRef(union_ty));
// The llvm type of the alloca will be the named LLVM union type, and will not
- // necessarily match the format that we need, depending on which tag is active. We
- // must construct the correct unnamed struct type here and bitcast, in order to
- // then set the fields appropriately.
+ // necessarily match the format that we need, depending on which tag is active.
+ // We must construct the correct unnamed struct type here, in order to then set
+ // the fields appropriately.
const result_ptr = self.buildAlloca(union_llvm_ty, layout.abi_align);
const llvm_payload = try self.resolveInst(extra.init);
assert(union_obj.haveFieldTypes());
@@ -9330,8 +9380,6 @@ pub const FuncGen = struct {
break :t self.context.structType(&fields, fields_len, .False);
};
- const casted_ptr = self.builder.buildBitCast(result_ptr, llvm_union_ty.pointerType(0), "");
-
// Now we follow the layout as expressed above with GEP instructions to set the
// tag and the payload.
const index_type = self.context.intType(32);
@@ -9351,8 +9399,8 @@ pub const FuncGen = struct {
index_type.constNull(),
};
const len: c_uint = if (field_size == layout.payload_size) 2 else 3;
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, len, "");
- self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
+ const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, "");
+ try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
return result_ptr;
}
@@ -9363,15 +9411,15 @@ pub const FuncGen = struct {
index_type.constNull(),
};
const len: c_uint = if (field_size == layout.payload_size) 2 else 3;
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, len, "");
- self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
+ const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, "");
+ try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
}
{
const indices: [2]*llvm.Value = .{
index_type.constNull(),
index_type.constInt(@boolToInt(layout.tag_align < layout.payload_align), .False),
};
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, indices.len, "");
+ const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, "");
const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
const llvm_tag = tag_llvm_ty.constInt(tag_int, .False);
const store_inst = self.builder.buildStore(llvm_tag, field_ptr);
@@ -9420,8 +9468,7 @@ pub const FuncGen = struct {
.data => {},
}
- const llvm_u8 = self.context.intType(8);
- const llvm_ptr_u8 = llvm_u8.pointerType(0);
+ const llvm_ptr_u8 = self.context.pointerType(0);
const llvm_u32 = self.context.intType(32);
const llvm_fn_name = "llvm.prefetch.p0";
@@ -9436,10 +9483,9 @@ pub const FuncGen = struct {
};
const ptr = try self.resolveInst(prefetch.ptr);
- const ptr_u8 = self.builder.buildBitCast(ptr, llvm_ptr_u8, "");
const params = [_]*llvm.Value{
- ptr_u8,
+ ptr,
llvm_u32.constInt(@enumToInt(prefetch.rw), .False),
llvm_u32.constInt(prefetch.locality, .False),
llvm_u32.constInt(@enumToInt(prefetch.cache), .False),
@@ -9466,8 +9512,7 @@ pub const FuncGen = struct {
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget());
- const llvm_slice_ty = try self.dg.lowerType(slice_ty);
- const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space
+ const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef());
@@ -9540,7 +9585,7 @@ pub const FuncGen = struct {
non_null_bit: *llvm.Value,
) !?*llvm.Value {
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
- const non_null_field = self.builder.buildZExt(non_null_bit, self.dg.context.intType(8), "");
+ const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), "");
if (isByRef(optional_ty)) {
const target = self.dg.module.getTarget();
@@ -9581,31 +9626,24 @@ pub const FuncGen = struct {
.Packed => {
const result_ty = self.air.typeOfIndex(inst);
const result_ty_info = result_ty.ptrInfo().data;
- const result_llvm_ty = try self.dg.lowerType(result_ty);
if (result_ty_info.host_size != 0) {
// From LLVM's perspective, a pointer to a packed struct and a pointer
// to a field of a packed struct are the same. The difference is in the
// Zig pointer type which provides information for how to mask and shift
// out the relevant bits when accessing the pointee.
- // Here we perform a bitcast because we want to use the host_size
- // as the llvm pointer element type.
- return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
+ return struct_ptr;
}
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target);
- if (byte_offset == 0) {
- return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
- }
+ if (byte_offset == 0) return struct_ptr;
const byte_llvm_ty = self.context.intType(8);
- const ptr_as_bytes = self.builder.buildBitCast(struct_ptr, byte_llvm_ty.pointerType(0), "");
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_index = llvm_usize.constInt(byte_offset, .False);
const indices: [1]*llvm.Value = .{llvm_index};
- const new_ptr = self.builder.buildInBoundsGEP(byte_llvm_ty, ptr_as_bytes, &indices, indices.len, "");
- return self.builder.buildBitCast(new_ptr, result_llvm_ty, "");
+ return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, "");
},
else => {
const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty);
@@ -9618,39 +9656,25 @@ pub const FuncGen = struct {
// end of the struct. Treat our struct pointer as an array of two and get
// the index to the element at index `1` to get a pointer to the end of
// the struct.
- const llvm_usize = try self.dg.lowerType(Type.usize);
- const llvm_index = llvm_usize.constInt(1, .False);
+ const llvm_u32 = self.context.intType(32);
+ const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, "");
}
},
},
- .Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty),
+ .Union => {
+ const layout = struct_ty.unionGetLayout(target);
+ if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr;
+ const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
+ const union_llvm_ty = try self.dg.lowerType(struct_ty);
+ const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, "");
+ return union_field_ptr;
+ },
else => unreachable,
}
}
- fn unionFieldPtr(
- self: *FuncGen,
- inst: Air.Inst.Index,
- union_ptr: *llvm.Value,
- union_ty: Type,
- ) !?*llvm.Value {
- const target = self.dg.module.getTarget();
- const layout = union_ty.unionGetLayout(target);
- const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
- if (layout.payload_size == 0) {
- return self.builder.buildBitCast(union_ptr, result_llvm_ty, "");
- }
- if (union_ty.containerLayout() == .Packed) {
- return self.builder.buildBitCast(union_ptr, result_llvm_ty, "");
- }
- const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
- const union_llvm_ty = try self.dg.lowerType(union_ty);
- const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, union_ptr, payload_index, "");
- return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
- }
-
fn getIntrinsic(self: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
@@ -9669,13 +9693,12 @@ pub const FuncGen = struct {
const target = fg.dg.module.getTarget();
const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target));
const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align);
- const llvm_ptr_u8 = fg.context.intType(8).pointerType(0);
const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits);
const size_bytes = pointee_type.abiSize(target);
_ = fg.builder.buildMemCpy(
- fg.builder.buildBitCast(result_ptr, llvm_ptr_u8, ""),
+ result_ptr,
result_align,
- fg.builder.buildBitCast(ptr, llvm_ptr_u8, ""),
+ ptr,
ptr_alignment,
llvm_usize.constInt(size_bytes, .False),
is_volatile,
@@ -9693,6 +9716,20 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const ptr_alignment = info.alignment(target);
const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr());
+
+ assert(info.vector_index != .runtime);
+ if (info.vector_index != .none) {
+ const index_u32 = self.context.intType(32).constInt(@enumToInt(info.vector_index), .False);
+ const vec_elem_ty = try self.dg.lowerType(info.pointee_type);
+ const vec_ty = vec_elem_ty.vectorType(info.host_size);
+
+ const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
+ loaded_vector.setAlignment(ptr_alignment);
+ loaded_vector.setVolatile(ptr_volatile);
+
+ return self.builder.buildExtractElement(loaded_vector, index_u32, "");
+ }
+
if (info.host_size == 0) {
if (isByRef(info.pointee_type)) {
return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile");
@@ -9705,8 +9742,7 @@ pub const FuncGen = struct {
}
const int_elem_ty = self.context.intType(info.host_size * 8);
- const int_ptr = self.builder.buildBitCast(ptr, int_elem_ty.pointerType(0), "");
- const containing_int = self.builder.buildLoad(int_elem_ty, int_ptr, "");
+ const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
@@ -9721,13 +9757,12 @@ pub const FuncGen = struct {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- const bitcasted_ptr = self.builder.buildBitCast(result_ptr, same_size_int.pointerType(0), "");
- const store_inst = self.builder.buildStore(truncated_int, bitcasted_ptr);
+ const store_inst = self.builder.buildStore(truncated_int, result_ptr);
store_inst.setAlignment(result_align);
return result_ptr;
}
- if (info.pointee_type.zigTypeTag() == .Float) {
+ if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
@@ -9748,7 +9783,7 @@ pub const FuncGen = struct {
ptr_ty: Type,
elem: *llvm.Value,
ordering: llvm.AtomicOrdering,
- ) void {
+ ) !void {
const info = ptr_ty.ptrInfo().data;
const elem_ty = info.pointee_type;
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
@@ -9757,10 +9792,29 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const ptr_alignment = ptr_ty.ptrAlignment(target);
const ptr_volatile = llvm.Bool.fromBool(info.@"volatile");
+
+ assert(info.vector_index != .runtime);
+ if (info.vector_index != .none) {
+ const index_u32 = self.context.intType(32).constInt(@enumToInt(info.vector_index), .False);
+ const vec_elem_ty = try self.dg.lowerType(elem_ty);
+ const vec_ty = vec_elem_ty.vectorType(info.host_size);
+
+ const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
+ loaded_vector.setAlignment(ptr_alignment);
+ loaded_vector.setVolatile(ptr_volatile);
+
+ const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32, "");
+
+ const store_inst = self.builder.buildStore(modified_vector, ptr);
+ assert(ordering == .NotAtomic);
+ store_inst.setAlignment(ptr_alignment);
+ store_inst.setVolatile(ptr_volatile);
+ return;
+ }
+
if (info.host_size != 0) {
const int_elem_ty = self.context.intType(info.host_size * 8);
- const int_ptr = self.builder.buildBitCast(ptr, int_elem_ty.pointerType(0), "");
- const containing_int = self.builder.buildLoad(int_elem_ty, int_ptr, "");
+ const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
@@ -9785,7 +9839,7 @@ pub const FuncGen = struct {
const shifted_value = self.builder.buildShl(extended_value, shift_amt, "");
const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, "");
- const store_inst = self.builder.buildStore(ored_value, int_ptr);
+ const store_inst = self.builder.buildStore(ored_value, ptr);
assert(ordering == .NotAtomic);
store_inst.setAlignment(ptr_alignment);
store_inst.setVolatile(ptr_volatile);
@@ -9799,12 +9853,11 @@ pub const FuncGen = struct {
return;
}
assert(ordering == .NotAtomic);
- const llvm_ptr_u8 = self.context.intType(8).pointerType(0);
const size_bytes = elem_ty.abiSize(target);
_ = self.builder.buildMemCpy(
- self.builder.buildBitCast(ptr, llvm_ptr_u8, ""),
+ ptr,
ptr_alignment,
- self.builder.buildBitCast(elem, llvm_ptr_u8, ""),
+ elem,
elem_ty.abiAlignment(target),
self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
info.@"volatile",
@@ -9859,7 +9912,7 @@ pub const FuncGen = struct {
constraints: [:0]const u8,
} = switch (target.cpu.arch) {
.x86 => .{
- .template =
+ .template =
\\roll $$3, %edi ; roll $$13, %edi
\\roll $$61, %edi ; roll $$51, %edi
\\xchgl %ebx,%ebx
@@ -9867,7 +9920,7 @@ pub const FuncGen = struct {
.constraints = "={edx},{eax},0,~{cc},~{memory}",
},
.x86_64 => .{
- .template =
+ .template =
\\rolq $$3, %rdi ; rolq $$13, %rdi
\\rolq $$61, %rdi ; rolq $$51, %rdi
\\xchgq %rbx,%rbx
@@ -9875,7 +9928,7 @@ pub const FuncGen = struct {
.constraints = "={rdx},{rax},0,~{cc},~{memory}",
},
.aarch64, .aarch64_32, .aarch64_be => .{
- .template =
+ .template =
\\ror x12, x12, #3 ; ror x12, x12, #13
\\ror x12, x12, #51 ; ror x12, x12, #61
\\orr x10, x10, x10
@@ -10994,6 +11047,7 @@ fn compilerRtIntBits(bits: u16) u16 {
}
fn buildAllocaInner(
+ context: *llvm.Context,
builder: *llvm.Builder,
llvm_func: *llvm.Value,
di_scope_non_null: bool,
@@ -11031,7 +11085,7 @@ fn buildAllocaInner(
// The pointer returned from this function should have the generic address space,
// if this isn't the case then cast it to the generic address space.
if (address_space != llvm.address_space.default) {
- return builder.buildAddrSpaceCast(alloca, llvm_ty.pointerType(llvm.address_space.default), "");
+ return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), "");
}
return alloca;
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 90d0f51c7b..c78c951eee 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -85,6 +85,9 @@ pub const Context = opaque {
pub const createBuilder = LLVMCreateBuilderInContext;
extern fn LLVMCreateBuilderInContext(C: *Context) *Builder;
+
+ pub const setOptBisectLimit = ZigLLVMSetOptBisectLimit;
+ extern fn ZigLLVMSetOptBisectLimit(C: *Context, limit: c_int) void;
};
pub const Value = opaque {
@@ -287,9 +290,6 @@ pub const Type = opaque {
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *Type) *Value;
- pub const pointerType = LLVMPointerType;
- extern fn LLVMPointerType(ElementType: *Type, AddressSpace: c_uint) *Type;
-
pub const arrayType = LLVMArrayType;
extern fn LLVMArrayType(ElementType: *Type, ElementCount: c_uint) *Type;
@@ -965,6 +965,9 @@ pub const Builder = opaque {
pub const buildAllocaInAddressSpace = ZigLLVMBuildAllocaInAddressSpace;
extern fn ZigLLVMBuildAllocaInAddressSpace(B: *Builder, Ty: *Type, AddressSpace: c_uint, Name: [*:0]const u8) *Value;
+
+ pub const buildVAArg = LLVMBuildVAArg;
+ extern fn LLVMBuildVAArg(*Builder, List: *Value, Ty: *Type, Name: [*:0]const u8) *Value;
};
pub const MDString = opaque {
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index 768525028e..83f594dcef 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -116,7 +116,7 @@ fn writeOperands(section: *Section, comptime Operands: type, operands: Operands)
};
inline for (fields) |field| {
- section.writeOperand(field.field_type, @field(operands, field.name));
+ section.writeOperand(field.type, @field(operands, field.name));
}
}
@@ -196,7 +196,7 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
var mask: Word = 0;
inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
- switch (@typeInfo(field.field_type)) {
+ switch (@typeInfo(field.type)) {
.Optional => if (@field(operand, field.name) != null) {
mask |= 1 << @intCast(u5, bit);
},
@@ -214,7 +214,7 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand
section.writeWord(mask);
inline for (@typeInfo(Operand).Struct.fields) |field| {
- switch (@typeInfo(field.field_type)) {
+ switch (@typeInfo(field.type)) {
.Optional => |info| if (@field(operand, field.name)) |child| {
section.writeOperands(info.child, child);
},
@@ -230,7 +230,7 @@ fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operan
inline for (@typeInfo(Operand).Union.fields) |field| {
if (@field(Operand, field.name) == tag) {
- section.writeOperands(field.field_type, @field(operand, field.name));
+ section.writeOperands(field.type, @field(operand, field.name));
return;
}
}
@@ -250,7 +250,7 @@ fn operandsSize(comptime Operands: type, operands: Operands) usize {
var total: usize = 0;
inline for (fields) |field| {
- total += operandSize(field.field_type, @field(operands, field.name));
+ total += operandSize(field.type, @field(operands, field.name));
}
return total;
@@ -304,7 +304,7 @@ fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
var total: usize = 0;
var any_set = false;
inline for (@typeInfo(Operand).Struct.fields) |field| {
- switch (@typeInfo(field.field_type)) {
+ switch (@typeInfo(field.type)) {
.Optional => |info| if (@field(operand, field.name)) |child| {
total += operandsSize(info.child, child);
any_set = true;
@@ -326,7 +326,7 @@ fn extendedUnionSize(comptime Operand: type, operand: Operand) usize {
inline for (@typeInfo(Operand).Union.fields) |field| {
if (@field(Operand, field.name) == tag) {
// Add one for the tag itself.
- return 1 + operandsSize(field.field_type, @field(operand, field.name));
+ return 1 + operandsSize(field.type, @field(operand, field.name));
}
}
unreachable;
diff --git a/src/empty.zig b/src/empty.zig
deleted file mode 100644
index e69de29bb2..0000000000
--- a/src/empty.zig
+++ /dev/null
diff --git a/src/link.zig b/src/link.zig
index 450763ac18..0a526db0de 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -128,6 +128,7 @@ pub const Options = struct {
compress_debug_sections: CompressDebugSections,
bind_global_refs_locally: bool,
import_memory: bool,
+ import_symbols: bool,
import_table: bool,
export_table: bool,
initial_memory: ?u64,
@@ -168,6 +169,7 @@ pub const Options = struct {
print_gc_sections: bool,
print_icf_sections: bool,
print_map: bool,
+ opt_bisect_limit: i32,
objects: []Compilation.LinkObject,
framework_dirs: []const []const u8,
@@ -223,7 +225,9 @@ pub const Options = struct {
pub fn move(self: *Options) Options {
const copied_state = self.*;
+ self.frameworks = .{};
self.system_libs = .{};
+ self.force_undefined_symbols = .{};
return copied_state;
}
};
@@ -624,7 +628,9 @@ pub const File = struct {
base.releaseLock();
if (base.file) |f| f.close();
if (base.intermediary_basename) |sub_path| base.allocator.free(sub_path);
+ base.options.frameworks.deinit(base.allocator);
base.options.system_libs.deinit(base.allocator);
+ base.options.force_undefined_symbols.deinit(base.allocator);
switch (base.tag) {
.coff => {
if (build_options.only_c) unreachable;
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index aa94704c54..9c05114a1f 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -289,6 +289,7 @@ pub fn deinit(self: *Coff) void {
self.unresolved.deinit(gpa);
self.locals_free_list.deinit(gpa);
+ self.globals_free_list.deinit(gpa);
self.strtab.deinit(gpa);
self.got_entries.deinit(gpa);
self.got_entries_free_list.deinit(gpa);
@@ -1150,8 +1151,10 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
}
fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
- _ = self.relocs.remove(atom);
- _ = self.base_relocs.remove(atom);
+ var removed_relocs = self.relocs.fetchRemove(atom);
+ if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
+ var removed_base_relocs = self.base_relocs.fetchRemove(atom);
+ if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
@@ -1489,9 +1492,8 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
gop.value_ptr.* = sym_loc;
const gpa = self.base.allocator;
- const sym_name = try gpa.dupe(u8, name);
const sym = self.getSymbolPtr(sym_loc);
- try self.setSymbolName(sym, sym_name);
+ try self.setSymbolName(sym, name);
sym.storage_class = .EXTERNAL;
try self.unresolved.putNoClobber(gpa, global_index, true);
@@ -1860,9 +1862,7 @@ fn writeHeader(self: *Coff) !void {
}
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
- // TODO https://github.com/ziglang/zig/issues/1284
- return math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch
- math.maxInt(@TypeOf(actual_size));
+ return actual_size +| (actual_size / ideal_factor);
}
fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 {
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 6cc3a7d68f..1b65bbb04b 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -2445,9 +2445,7 @@ fn makeString(self: *Dwarf, bytes: []const u8) !u32 {
}
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
- // TODO https://github.com/ziglang/zig/issues/1284
- return std.math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch
- std.math.maxInt(@TypeOf(actual_size));
+ return actual_size +| (actual_size / ideal_factor);
}
pub fn flushModule(self: *Dwarf, module: *Module) !void {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index ebb1cbdfb8..4910f4b599 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1753,11 +1753,6 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
try argv.append(ssp.full_object_path);
}
- // compiler-rt
- if (compiler_rt_path) |p| {
- try argv.append(p);
- }
-
// Shared libraries.
if (is_exe_or_dyn_lib) {
const system_libs = self.base.options.system_libs.keys();
@@ -1836,6 +1831,13 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
}
}
+ // compiler-rt. Since compiler_rt exports symbols like `memset`, it needs
+ // to be after the shared libraries, so they are picked up from the shared
+ // libraries, not libcompiler_rt.
+ if (compiler_rt_path) |p| {
+ try argv.append(p);
+ }
+
// crt postlude
if (csu.crtend) |v| try argv.append(v);
if (csu.crtn) |v| try argv.append(v);
@@ -3030,9 +3032,7 @@ fn getLDMOption(target: std.Target) ?[]const u8 {
}
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
- // TODO https://github.com/ziglang/zig/issues/1284
- return std.math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch
- std.math.maxInt(@TypeOf(actual_size));
+ return actual_size +| (actual_size / ideal_factor);
}
// Provide a blueprint of csu (c-runtime startup) objects for supported
@@ -3058,27 +3058,22 @@ const CsuObjects = struct {
var result: CsuObjects = .{};
- // TODO: https://github.com/ziglang/zig/issues/4629
- // - use inline enum type
- // - reduce to enum-literals for values
- const Mode = enum {
+ // Flatten crt cases.
+ const mode: enum {
dynamic_lib,
dynamic_exe,
dynamic_pie,
static_exe,
static_pie,
- };
-
- // Flatten crt case types.
- const mode: Mode = switch (link_options.output_mode) {
+ } = switch (link_options.output_mode) {
.Obj => return CsuObjects{},
.Lib => switch (link_options.link_mode) {
- .Dynamic => Mode.dynamic_lib,
+ .Dynamic => .dynamic_lib,
.Static => return CsuObjects{},
},
.Exe => switch (link_options.link_mode) {
- .Dynamic => if (link_options.pie) Mode.dynamic_pie else Mode.dynamic_exe,
- .Static => if (link_options.pie) Mode.static_pie else Mode.static_exe,
+ .Dynamic => if (link_options.pie) .dynamic_pie else .dynamic_exe,
+ .Static => if (link_options.pie) .static_pie else .static_exe,
},
};
@@ -3108,7 +3103,6 @@ const CsuObjects = struct {
// hosted-glibc provides crtbegin/end objects in platform/compiler-specific dirs
// and they are not known at comptime. For now null-out crtbegin/end objects;
// there is no feature loss, zig has never linked those objects in before.
- // TODO: probe for paths, ie. `cc -print-file-name`
result.crtbegin = null;
result.crtend = null;
} else {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 4a1ca9a357..be770574b8 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -20,6 +20,7 @@ const dead_strip = @import("MachO/dead_strip.zig");
const fat = @import("MachO/fat.zig");
const link = @import("../link.zig");
const llvm_backend = @import("../codegen/llvm.zig");
+const load_commands = @import("MachO/load_commands.zig");
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const zld = @import("MachO/zld.zig");
@@ -38,6 +39,7 @@ const Object = @import("MachO/Object.zig");
const LibStub = @import("tapi.zig").LibStub;
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
+const Md5 = std.crypto.hash.Md5;
const Module = @import("../Module.zig");
const Relocation = @import("MachO/Relocation.zig");
const StringTable = @import("strtab.zig").StringTable;
@@ -98,10 +100,11 @@ page_size: u16,
/// fashion (default for LLVM backend).
mode: enum { incremental, one_shot },
-uuid: macho.uuid_command = .{
- .cmdsize = @sizeOf(macho.uuid_command),
- .uuid = undefined,
-},
+dyld_info_cmd: macho.dyld_info_command = .{},
+symtab_cmd: macho.symtab_command = .{},
+dysymtab_cmd: macho.dysymtab_command = .{},
+uuid_cmd: macho.uuid_command = .{},
+codesig_cmd: macho.linkedit_data_command = .{ .cmd = .CODE_SIGNATURE },
dylibs: std.ArrayListUnmanaged(Dylib) = .{},
dylibs_map: std.StringHashMapUnmanaged(u16) = .{},
@@ -265,9 +268,6 @@ pub const SymbolWithLoc = struct {
/// actual_capacity + (actual_capacity / ideal_factor)
const ideal_factor = 3;
-/// Default path to dyld
-pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld";
-
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
@@ -556,40 +556,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
self.logAtoms();
}
- var lc_buffer = std.ArrayList(u8).init(arena);
- const lc_writer = lc_buffer.writer();
- var ncmds: u32 = 0;
-
- try self.writeLinkeditSegmentData(&ncmds, lc_writer);
- try writeDylinkerLC(&ncmds, lc_writer);
-
- self.writeMainLC(&ncmds, lc_writer) catch |err| switch (err) {
- error.MissingMainEntrypoint => {
- self.error_flags.no_entry_point_found = true;
- },
- else => |e| return e,
- };
-
- try self.writeDylibIdLC(&ncmds, lc_writer);
- try self.writeRpathLCs(&ncmds, lc_writer);
-
- {
- try lc_writer.writeStruct(macho.source_version_command{
- .cmdsize = @sizeOf(macho.source_version_command),
- .version = 0x0,
- });
- ncmds += 1;
- }
-
- try self.writeBuildVersionLC(&ncmds, lc_writer);
-
- {
- std.crypto.random.bytes(&self.uuid.uuid);
- try lc_writer.writeStruct(self.uuid);
- ncmds += 1;
- }
-
- try self.writeLoadDylibLCs(&ncmds, lc_writer);
+ try self.writeLinkeditSegmentData();
const target = self.base.options.target;
const requires_codesig = blk: {
@@ -598,7 +565,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
break :blk true;
break :blk false;
};
- var codesig_offset: ?u32 = null;
var codesig: ?CodeSignature = if (requires_codesig) blk: {
// Preallocate space for the code signature.
// We need to do this at this stage so that we have the load commands with proper values
@@ -608,22 +574,72 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
var codesig = CodeSignature.init(self.page_size);
codesig.code_directory.ident = self.base.options.emit.?.sub_path;
if (self.base.options.entitlements) |path| {
- try codesig.addEntitlements(arena, path);
+ try codesig.addEntitlements(self.base.allocator, path);
}
- codesig_offset = try self.writeCodeSignaturePadding(&codesig, &ncmds, lc_writer);
+ try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
+ defer if (codesig) |*csig| csig.deinit(self.base.allocator);
- var headers_buf = std.ArrayList(u8).init(arena);
- try self.writeSegmentHeaders(&ncmds, headers_buf.writer());
+ // Write load commands
+ var lc_buffer = std.ArrayList(u8).init(arena);
+ const lc_writer = lc_buffer.writer();
- try self.base.file.?.pwriteAll(headers_buf.items, @sizeOf(macho.mach_header_64));
- try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64) + headers_buf.items.len);
+ try self.writeSegmentHeaders(lc_writer);
+ try lc_writer.writeStruct(self.dyld_info_cmd);
+ try lc_writer.writeStruct(self.symtab_cmd);
+ try lc_writer.writeStruct(self.dysymtab_cmd);
+ try load_commands.writeDylinkerLC(lc_writer);
- try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len + headers_buf.items.len));
+ switch (self.base.options.output_mode) {
+ .Exe => blk: {
+ const seg_id = self.header_segment_cmd_index.?;
+ const seg = self.segments.items[seg_id];
+ const global = self.getEntryPoint() catch |err| switch (err) {
+ error.MissingMainEntrypoint => {
+ self.error_flags.no_entry_point_found = true;
+ break :blk;
+ },
+ else => |e| return e,
+ };
+ const sym = self.getSymbol(global);
+ try lc_writer.writeStruct(macho.entry_point_command{
+ .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
+ .stacksize = self.base.options.stack_size_override orelse 0,
+ });
+ },
+ .Lib => if (self.base.options.link_mode == .Dynamic) {
+ try load_commands.writeDylibIdLC(self.base.allocator, &self.base.options, lc_writer);
+ },
+ else => {},
+ }
+
+ try load_commands.writeRpathLCs(self.base.allocator, &self.base.options, lc_writer);
+ try lc_writer.writeStruct(macho.source_version_command{
+ .version = 0,
+ });
+ try load_commands.writeBuildVersionLC(&self.base.options, lc_writer);
+
+ if (self.cold_start) {
+ std.crypto.random.bytes(&self.uuid_cmd.uuid);
+ Md5.hash(&self.uuid_cmd.uuid, &self.uuid_cmd.uuid, .{});
+ conformUuid(&self.uuid_cmd.uuid);
+ }
+ try lc_writer.writeStruct(self.uuid_cmd);
+
+ try load_commands.writeLoadDylibLCs(self.dylibs.items, self.referenced_dylibs.keys(), lc_writer);
+
+ if (requires_codesig) {
+ try lc_writer.writeStruct(self.codesig_cmd);
+ }
+
+ try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
+
+ const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
+ try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
if (codesig) |*csig| {
- try self.writeCodeSignature(comp, csig, codesig_offset.?); // code signing always comes last
+ try self.writeCodeSignature(comp, csig); // code signing always comes last
}
if (self.d_sym) |*d_sym| {
@@ -653,6 +669,11 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
self.cold_start = false;
}
+inline fn conformUuid(out: *[Md5.digest_length]u8) void {
+ // LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
+ out[6] = (out[6] & 0x0F) | (3 << 4);
+ out[8] = (out[8] & 0x3F) | 0x80;
+}
pub fn resolveLibSystem(
arena: Allocator,
@@ -1702,195 +1723,6 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
try self.writePtrWidthAtom(got_atom);
}
-pub fn writeDylinkerLC(ncmds: *u32, lc_writer: anytype) !void {
- const name_len = mem.sliceTo(default_dyld_path, 0).len;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.dylinker_command) + name_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.dylinker_command{
- .cmd = .LOAD_DYLINKER,
- .cmdsize = cmdsize,
- .name = @sizeOf(macho.dylinker_command),
- });
- try lc_writer.writeAll(mem.sliceTo(default_dyld_path, 0));
- const padding = cmdsize - @sizeOf(macho.dylinker_command) - name_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
-}
-
-pub fn writeMainLC(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- if (self.base.options.output_mode != .Exe) return;
- const seg_id = self.header_segment_cmd_index.?;
- const seg = self.segments.items[seg_id];
- const global = try self.getEntryPoint();
- const sym = self.getSymbol(global);
- try lc_writer.writeStruct(macho.entry_point_command{
- .cmd = .MAIN,
- .cmdsize = @sizeOf(macho.entry_point_command),
- .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
- .stacksize = self.base.options.stack_size_override orelse 0,
- });
- ncmds.* += 1;
-}
-
-const WriteDylibLCCtx = struct {
- cmd: macho.LC,
- name: []const u8,
- timestamp: u32 = 2,
- current_version: u32 = 0x10000,
- compatibility_version: u32 = 0x10000,
-};
-
-pub fn writeDylibLC(ctx: WriteDylibLCCtx, ncmds: *u32, lc_writer: anytype) !void {
- const name_len = ctx.name.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.dylib_command) + name_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.dylib_command{
- .cmd = ctx.cmd,
- .cmdsize = cmdsize,
- .dylib = .{
- .name = @sizeOf(macho.dylib_command),
- .timestamp = ctx.timestamp,
- .current_version = ctx.current_version,
- .compatibility_version = ctx.compatibility_version,
- },
- });
- try lc_writer.writeAll(ctx.name);
- try lc_writer.writeByte(0);
- const padding = cmdsize - @sizeOf(macho.dylib_command) - name_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
-}
-
-pub fn writeDylibIdLC(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- if (self.base.options.output_mode != .Lib) return;
- const install_name = self.base.options.install_name orelse self.base.options.emit.?.sub_path;
- const curr = self.base.options.version orelse std.builtin.Version{
- .major = 1,
- .minor = 0,
- .patch = 0,
- };
- const compat = self.base.options.compatibility_version orelse std.builtin.Version{
- .major = 1,
- .minor = 0,
- .patch = 0,
- };
- try writeDylibLC(.{
- .cmd = .ID_DYLIB,
- .name = install_name,
- .current_version = curr.major << 16 | curr.minor << 8 | curr.patch,
- .compatibility_version = compat.major << 16 | compat.minor << 8 | compat.patch,
- }, ncmds, lc_writer);
-}
-
-const RpathIterator = struct {
- buffer: []const []const u8,
- table: std.StringHashMap(void),
- count: usize = 0,
-
- fn init(gpa: Allocator, rpaths: []const []const u8) RpathIterator {
- return .{ .buffer = rpaths, .table = std.StringHashMap(void).init(gpa) };
- }
-
- fn deinit(it: *RpathIterator) void {
- it.table.deinit();
- }
-
- fn next(it: *RpathIterator) !?[]const u8 {
- while (true) {
- if (it.count >= it.buffer.len) return null;
- const rpath = it.buffer[it.count];
- it.count += 1;
- const gop = try it.table.getOrPut(rpath);
- if (gop.found_existing) continue;
- return rpath;
- }
- }
-};
-
-pub fn writeRpathLCs(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- const gpa = self.base.allocator;
-
- var it = RpathIterator.init(gpa, self.base.options.rpath_list);
- defer it.deinit();
-
- while (try it.next()) |rpath| {
- const rpath_len = rpath.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.rpath_command) + rpath_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.rpath_command{
- .cmdsize = cmdsize,
- .path = @sizeOf(macho.rpath_command),
- });
- try lc_writer.writeAll(rpath);
- try lc_writer.writeByte(0);
- const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
- }
-}
-
-pub fn writeBuildVersionLC(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
- const platform_version = blk: {
- const ver = self.base.options.target.os.version_range.semver.min;
- const platform_version = ver.major << 16 | ver.minor << 8;
- break :blk platform_version;
- };
- const sdk_version = if (self.base.options.native_darwin_sdk) |sdk| blk: {
- const ver = sdk.version;
- const sdk_version = ver.major << 16 | ver.minor << 8;
- break :blk sdk_version;
- } else platform_version;
- const is_simulator_abi = self.base.options.target.abi == .simulator;
- try lc_writer.writeStruct(macho.build_version_command{
- .cmdsize = cmdsize,
- .platform = switch (self.base.options.target.os.tag) {
- .macos => .MACOS,
- .ios => if (is_simulator_abi) macho.PLATFORM.IOSSIMULATOR else macho.PLATFORM.IOS,
- .watchos => if (is_simulator_abi) macho.PLATFORM.WATCHOSSIMULATOR else macho.PLATFORM.WATCHOS,
- .tvos => if (is_simulator_abi) macho.PLATFORM.TVOSSIMULATOR else macho.PLATFORM.TVOS,
- else => unreachable,
- },
- .minos = platform_version,
- .sdk = sdk_version,
- .ntools = 1,
- });
- try lc_writer.writeAll(mem.asBytes(&macho.build_tool_version{
- .tool = .LD,
- .version = 0x0,
- }));
- ncmds.* += 1;
-}
-
-pub fn writeLoadDylibLCs(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- for (self.referenced_dylibs.keys()) |id| {
- const dylib = self.dylibs.items[id];
- const dylib_id = dylib.id orelse unreachable;
- try writeDylibLC(.{
- .cmd = if (dylib.weak) .LOAD_WEAK_DYLIB else .LOAD_DYLIB,
- .name = dylib_id.name,
- .timestamp = dylib_id.timestamp,
- .current_version = dylib_id.current_version,
- .compatibility_version = dylib_id.compatibility_version,
- }, ncmds, lc_writer);
- }
-}
-
pub fn deinit(self: *MachO) void {
const gpa = self.base.allocator;
@@ -2744,10 +2576,14 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
}
fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- _ = self.relocs.remove(atom);
- _ = self.rebases.remove(atom);
- _ = self.bindings.remove(atom);
- _ = self.lazy_bindings.remove(atom);
+ var removed_relocs = self.relocs.fetchRemove(atom);
+ if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
+ var removed_rebases = self.rebases.fetchRemove(atom);
+ if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
+ var removed_bindings = self.bindings.fetchRemove(atom);
+ if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
+ var removed_lazy_bindings = self.lazy_bindings.fetchRemove(atom);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2976,98 +2812,7 @@ pub fn populateMissingMetadata(self: *MachO) !void {
}
}
-pub inline fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 {
- const darwin_path_max = 1024;
- const name_len = if (assume_max_path_len) darwin_path_max else std.mem.len(name) + 1;
- return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64));
-}
-
-fn calcLCsSize(self: *MachO, assume_max_path_len: bool) !u32 {
- const gpa = self.base.allocator;
- var sizeofcmds: u64 = 0;
- for (self.segments.items) |seg| {
- sizeofcmds += seg.nsects * @sizeOf(macho.section_64) + @sizeOf(macho.segment_command_64);
- }
-
- // LC_DYLD_INFO_ONLY
- sizeofcmds += @sizeOf(macho.dyld_info_command);
- // LC_FUNCTION_STARTS
- if (self.text_section_index != null) {
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- }
- // LC_DATA_IN_CODE
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- // LC_SYMTAB
- sizeofcmds += @sizeOf(macho.symtab_command);
- // LC_DYSYMTAB
- sizeofcmds += @sizeOf(macho.dysymtab_command);
- // LC_LOAD_DYLINKER
- sizeofcmds += calcInstallNameLen(
- @sizeOf(macho.dylinker_command),
- mem.sliceTo(default_dyld_path, 0),
- false,
- );
- // LC_MAIN
- if (self.base.options.output_mode == .Exe) {
- sizeofcmds += @sizeOf(macho.entry_point_command);
- }
- // LC_ID_DYLIB
- if (self.base.options.output_mode == .Lib) {
- sizeofcmds += blk: {
- const install_name = self.base.options.install_name orelse self.base.options.emit.?.sub_path;
- break :blk calcInstallNameLen(
- @sizeOf(macho.dylib_command),
- install_name,
- assume_max_path_len,
- );
- };
- }
- // LC_RPATH
- {
- var it = RpathIterator.init(gpa, self.base.options.rpath_list);
- defer it.deinit();
- while (try it.next()) |rpath| {
- sizeofcmds += calcInstallNameLen(
- @sizeOf(macho.rpath_command),
- rpath,
- assume_max_path_len,
- );
- }
- }
- // LC_SOURCE_VERSION
- sizeofcmds += @sizeOf(macho.source_version_command);
- // LC_BUILD_VERSION
- sizeofcmds += @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
- // LC_UUID
- sizeofcmds += @sizeOf(macho.uuid_command);
- // LC_LOAD_DYLIB
- for (self.referenced_dylibs.keys()) |id| {
- const dylib = self.dylibs.items[id];
- const dylib_id = dylib.id orelse unreachable;
- sizeofcmds += calcInstallNameLen(
- @sizeOf(macho.dylib_command),
- dylib_id.name,
- assume_max_path_len,
- );
- }
- // LC_CODE_SIGNATURE
- {
- const target = self.base.options.target;
- const requires_codesig = blk: {
- if (self.base.options.entitlements) |_| break :blk true;
- if (target.cpu.arch == .aarch64 and (target.os.tag == .macos or target.abi == .simulator))
- break :blk true;
- break :blk false;
- };
- if (requires_codesig) {
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- }
- }
-
- return @intCast(u32, sizeofcmds);
-}
-
-pub fn calcPagezeroSize(self: *MachO) u64 {
+fn calcPagezeroSize(self: *MachO) u64 {
const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize;
const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size);
if (self.base.options.output_mode == .Lib) return 0;
@@ -3079,23 +2824,6 @@ pub fn calcPagezeroSize(self: *MachO) u64 {
return aligned_pagezero_vmsize;
}
-pub fn calcMinHeaderPad(self: *MachO) !u64 {
- var padding: u32 = (try self.calcLCsSize(false)) + (self.base.options.headerpad_size orelse 0);
- log.debug("minimum requested headerpad size 0x{x}", .{padding + @sizeOf(macho.mach_header_64)});
-
- if (self.base.options.headerpad_max_install_names) {
- var min_headerpad_size: u32 = try self.calcLCsSize(true);
- log.debug("headerpad_max_install_names minimum headerpad size 0x{x}", .{
- min_headerpad_size + @sizeOf(macho.mach_header_64),
- });
- padding = @max(padding, min_headerpad_size);
- }
- const offset = @sizeOf(macho.mach_header_64) + padding;
- log.debug("actual headerpad size 0x{x}", .{offset});
-
- return offset;
-}
-
fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts: struct {
size: u64 = 0,
alignment: u32 = 0,
@@ -3433,18 +3161,17 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 {
return global_index;
}
-fn writeSegmentHeaders(self: *MachO, ncmds: *u32, writer: anytype) !void {
+fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
for (self.segments.items) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
try writer.writeStruct(header);
}
- ncmds.* += 1;
}
}
-fn writeLinkeditSegmentData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
+fn writeLinkeditSegmentData(self: *MachO) !void {
const seg = self.getLinkeditSegmentPtr();
seg.filesize = 0;
seg.vmsize = 0;
@@ -3459,8 +3186,8 @@ fn writeLinkeditSegmentData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void
}
}
- try self.writeDyldInfoData(ncmds, lc_writer);
- try self.writeSymtabs(ncmds, lc_writer);
+ try self.writeDyldInfoData();
+ try self.writeSymtabs();
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
}
@@ -3612,7 +3339,7 @@ fn collectExportData(self: *MachO, trie: *Trie) !void {
try trie.finalize(gpa);
}
-fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
+fn writeDyldInfoData(self: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -3635,27 +3362,36 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
try self.collectExportData(&trie);
const link_seg = self.getLinkeditSegmentPtr();
- const rebase_off = mem.alignForwardGeneric(u64, link_seg.fileoff, @alignOf(u64));
- assert(rebase_off == link_seg.fileoff);
+ assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
+ const rebase_off = link_seg.fileoff;
const rebase_size = try bind.rebaseInfoSize(rebase_pointers.items);
- log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size });
+ const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
+ log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
- const bind_off = mem.alignForwardGeneric(u64, rebase_off + rebase_size, @alignOf(u64));
+ const bind_off = rebase_off + rebase_size_aligned;
const bind_size = try bind.bindInfoSize(bind_pointers.items);
- log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size });
+ const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
+ log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
- const lazy_bind_off = mem.alignForwardGeneric(u64, bind_off + bind_size, @alignOf(u64));
+ const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = try bind.lazyBindInfoSize(lazy_bind_pointers.items);
- log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size });
+ const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
+ log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
+ lazy_bind_off,
+ lazy_bind_off + lazy_bind_size_aligned,
+ });
- const export_off = mem.alignForwardGeneric(u64, lazy_bind_off + lazy_bind_size, @alignOf(u64));
+ const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
- log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size });
+ const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
+ log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
- const needed_size = export_off + export_size - rebase_off;
+ const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
+ return error.Overflow;
link_seg.filesize = needed_size;
+ assert(mem.isAlignedGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64)));
- var buffer = try gpa.alloc(u8, math.cast(usize, needed_size) orelse return error.Overflow);
+ var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
mem.set(u8, buffer, 0);
@@ -3683,21 +3419,14 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
const end = start + (math.cast(usize, lazy_bind_size) orelse return error.Overflow);
try self.populateLazyBindOffsetsInStubHelper(buffer[start..end]);
- try lc_writer.writeStruct(macho.dyld_info_command{
- .cmd = .DYLD_INFO_ONLY,
- .cmdsize = @sizeOf(macho.dyld_info_command),
- .rebase_off = @intCast(u32, rebase_off),
- .rebase_size = @intCast(u32, rebase_size),
- .bind_off = @intCast(u32, bind_off),
- .bind_size = @intCast(u32, bind_size),
- .weak_bind_off = 0,
- .weak_bind_size = 0,
- .lazy_bind_off = @intCast(u32, lazy_bind_off),
- .lazy_bind_size = @intCast(u32, lazy_bind_size),
- .export_off = @intCast(u32, export_off),
- .export_size = @intCast(u32, export_size),
- });
- ncmds.* += 1;
+ self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
+ self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
+ self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
+ self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
+ self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
+ self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
+ self.dyld_info_cmd.export_off = @intCast(u32, export_off);
+ self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
}
fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
@@ -3799,45 +3528,14 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
}
}
-fn writeSymtabs(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
- var symtab_cmd = macho.symtab_command{
- .cmdsize = @sizeOf(macho.symtab_command),
- .symoff = 0,
- .nsyms = 0,
- .stroff = 0,
- .strsize = 0,
- };
- var dysymtab_cmd = macho.dysymtab_command{
- .cmdsize = @sizeOf(macho.dysymtab_command),
- .ilocalsym = 0,
- .nlocalsym = 0,
- .iextdefsym = 0,
- .nextdefsym = 0,
- .iundefsym = 0,
- .nundefsym = 0,
- .tocoff = 0,
- .ntoc = 0,
- .modtaboff = 0,
- .nmodtab = 0,
- .extrefsymoff = 0,
- .nextrefsyms = 0,
- .indirectsymoff = 0,
- .nindirectsyms = 0,
- .extreloff = 0,
- .nextrel = 0,
- .locreloff = 0,
- .nlocrel = 0,
- };
- var ctx = try self.writeSymtab(&symtab_cmd);
+fn writeSymtabs(self: *MachO) !void {
+ var ctx = try self.writeSymtab();
defer ctx.imports_table.deinit();
- try self.writeDysymtab(ctx, &dysymtab_cmd);
- try self.writeStrtab(&symtab_cmd);
- try lc_writer.writeStruct(symtab_cmd);
- try lc_writer.writeStruct(dysymtab_cmd);
- ncmds.* += 2;
+ try self.writeDysymtab(ctx);
+ try self.writeStrtab();
}
-fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
+fn writeSymtab(self: *MachO) !SymtabCtx {
const gpa = self.base.allocator;
var locals = std.ArrayList(macho.nlist_64).init(gpa);
@@ -3884,13 +3582,11 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(
- u64,
- seg.fileoff + seg.filesize,
- @alignOf(macho.nlist_64),
- );
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
+ assert(mem.isAlignedGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64)));
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -3902,8 +3598,8 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.base.file.?.pwriteAll(buffer.items, offset);
- lc.symoff = @intCast(u32, offset);
- lc.nsyms = nsyms;
+ self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
.nlocalsym = nlocals,
@@ -3913,18 +3609,26 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
};
}
-fn writeStrtab(self: *MachO, lc: *macho.symtab_command) !void {
+fn writeStrtab(self: *MachO) !void {
+ const gpa = self.base.allocator;
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
- seg.filesize = offset + needed_size - seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ seg.filesize = offset + needed_size_aligned - seg.fileoff;
- log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
+
+ const buffer = try gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
+ defer gpa.free(buffer);
+ mem.set(u8, buffer, 0);
+ mem.copy(u8, buffer, self.strtab.buffer.items);
- try self.base.file.?.pwriteAll(self.strtab.buffer.items, offset);
+ try self.base.file.?.pwriteAll(buffer, offset);
- lc.stroff = @intCast(u32, offset);
- lc.strsize = @intCast(u32, needed_size);
+ self.symtab_cmd.stroff = @intCast(u32, offset);
+ self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
}
const SymtabCtx = struct {
@@ -3934,7 +3638,7 @@ const SymtabCtx = struct {
imports_table: std.AutoHashMap(SymbolWithLoc, u32),
};
-fn writeDysymtab(self: *MachO, ctx: SymtabCtx, lc: *macho.dysymtab_command) !void {
+fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const gpa = self.base.allocator;
const nstubs = @intCast(u32, self.stubs_table.count());
const ngot_entries = @intCast(u32, self.got_entries_table.count());
@@ -3943,15 +3647,17 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx, lc: *macho.dysymtab_command) !voi
const iundefsym = iextdefsym + ctx.nextdefsym;
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
- seg.filesize = offset + needed_size - seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ seg.filesize = offset + needed_size_aligned - seg.fileoff;
- log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
- try buf.ensureTotalCapacity(needed_size);
+ try buf.ensureTotalCapacity(math.cast(usize, needed_size_aligned) orelse return error.Overflow);
const writer = buf.writer();
if (self.stubs_section_index) |sect_id| {
@@ -3990,24 +3696,24 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx, lc: *macho.dysymtab_command) !voi
}
}
- assert(buf.items.len == needed_size);
+ const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
+ if (padding > 0) {
+ buf.appendNTimesAssumeCapacity(0, padding);
+ }
+
+ assert(buf.items.len == needed_size_aligned);
try self.base.file.?.pwriteAll(buf.items, offset);
- lc.nlocalsym = ctx.nlocalsym;
- lc.iextdefsym = iextdefsym;
- lc.nextdefsym = ctx.nextdefsym;
- lc.iundefsym = iundefsym;
- lc.nundefsym = ctx.nundefsym;
- lc.indirectsymoff = @intCast(u32, offset);
- lc.nindirectsyms = nindirectsyms;
+ self.dysymtab_cmd.nlocalsym = ctx.nlocalsym;
+ self.dysymtab_cmd.iextdefsym = iextdefsym;
+ self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
+ self.dysymtab_cmd.iundefsym = iundefsym;
+ self.dysymtab_cmd.nundefsym = ctx.nundefsym;
+ self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+ self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
-fn writeCodeSignaturePadding(
- self: *MachO,
- code_sig: *CodeSignature,
- ncmds: *u32,
- lc_writer: anytype,
-) !u32 {
+fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
@@ -4020,19 +3726,13 @@ fn writeCodeSignaturePadding(
// except for code signature data.
try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
- try lc_writer.writeStruct(macho.linkedit_data_command{
- .cmd = .CODE_SIGNATURE,
- .cmdsize = @sizeOf(macho.linkedit_data_command),
- .dataoff = @intCast(u32, offset),
- .datasize = @intCast(u32, needed_size),
- });
- ncmds.* += 1;
-
- return @intCast(u32, offset);
+ self.codesig_cmd.dataoff = @intCast(u32, offset);
+ self.codesig_cmd.datasize = @intCast(u32, needed_size);
}
-fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature, offset: u32) !void {
+fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void {
const seg = self.getSegment(self.text_section_index.?);
+ const offset = self.codesig_cmd.dataoff;
var buffer = std.ArrayList(u8).init(self.base.allocator);
defer buffer.deinit();
@@ -4098,9 +3798,7 @@ fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void {
}
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
- // TODO https://github.com/ziglang/zig/issues/1284
- return std.math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch
- std.math.maxInt(@TypeOf(actual_size));
+ return actual_size +| (actual_size / ideal_factor);
}
fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index e3c362e941..8bc00d9181 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -1,6 +1,4 @@
const CodeSignature = @This();
-const Compilation = @import("../../Compilation.zig");
-const WaitGroup = @import("../../WaitGroup.zig");
const std = @import("std");
const assert = std.debug.assert;
@@ -9,10 +7,14 @@ const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
+
const Allocator = mem.Allocator;
+const Compilation = @import("../../Compilation.zig");
const Sha256 = std.crypto.hash.sha2.Sha256;
+const ThreadPool = @import("../../ThreadPool.zig");
+const WaitGroup = @import("../../WaitGroup.zig");
-const hash_size: u8 = 32;
+const hash_size = Sha256.digest_length;
const Blob = union(enum) {
code_directory: *CodeDirectory,
@@ -109,7 +111,7 @@ const CodeDirectory = struct {
fn size(self: CodeDirectory) u32 {
const code_slots = self.inner.nCodeSlots * hash_size;
const special_slots = self.inner.nSpecialSlots * hash_size;
- return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1) + special_slots + code_slots;
+ return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots);
}
fn write(self: CodeDirectory, writer: anytype) !void {
@@ -287,33 +289,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.nCodeSlots = total_pages;
// Calculate hash for each page (in file) and write it to the buffer
- var wg: WaitGroup = .{};
- {
- const buffer = try gpa.alloc(u8, self.page_size * total_pages);
- defer gpa.free(buffer);
-
- const results = try gpa.alloc(fs.File.PReadError!usize, total_pages);
- defer gpa.free(results);
- {
- wg.reset();
- defer wg.wait();
-
- var i: usize = 0;
- while (i < total_pages) : (i += 1) {
- const fstart = i * self.page_size;
- const fsize = if (fstart + self.page_size > opts.file_size)
- opts.file_size - fstart
- else
- self.page_size;
- const out_hash = &self.code_directory.code_slots.items[i];
- wg.start();
- try comp.thread_pool.spawn(workerSha256Hash, .{
- opts.file, fstart, buffer[fstart..][0..fsize], out_hash, &results[i], &wg,
- });
- }
- }
- for (results) |result| _ = try result;
- }
+ try self.parallelHash(gpa, comp.thread_pool, opts.file, opts.file_size);
try blobs.append(.{ .code_directory = &self.code_directory });
header.length += @sizeOf(macho.BlobIndex);
@@ -352,7 +328,7 @@ pub fn writeAdhocSignature(
}
self.code_directory.inner.hashOffset =
- @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1) + self.code_directory.inner.nSpecialSlots * hash_size;
+ @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size);
self.code_directory.inner.length = self.code_directory.size();
header.length += self.code_directory.size();
@@ -372,17 +348,60 @@ pub fn writeAdhocSignature(
}
}
-fn workerSha256Hash(
+fn parallelHash(
+ self: *CodeSignature,
+ gpa: Allocator,
+ pool: *ThreadPool,
+ file: fs.File,
+ file_size: u32,
+) !void {
+ var wg: WaitGroup = .{};
+
+ const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size;
+ assert(self.code_directory.code_slots.items.len >= total_num_chunks);
+
+ const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks);
+ defer gpa.free(buffer);
+
+ const results = try gpa.alloc(fs.File.PReadError!usize, total_num_chunks);
+ defer gpa.free(results);
+
+ {
+ wg.reset();
+ defer wg.wait();
+
+ var i: usize = 0;
+ while (i < total_num_chunks) : (i += 1) {
+ const fstart = i * self.page_size;
+ const fsize = if (fstart + self.page_size > file_size)
+ file_size - fstart
+ else
+ self.page_size;
+ wg.start();
+ try pool.spawn(worker, .{
+ file,
+ fstart,
+ buffer[fstart..][0..fsize],
+ &self.code_directory.code_slots.items[i],
+ &results[i],
+ &wg,
+ });
+ }
+ }
+ for (results) |result| _ = try result;
+}
+
+fn worker(
file: fs.File,
fstart: usize,
buffer: []u8,
- hash: *[hash_size]u8,
+ out: *[hash_size]u8,
err: *fs.File.PReadError!usize,
wg: *WaitGroup,
) void {
defer wg.finish();
err.* = file.preadAll(buffer, fstart);
- Sha256.hash(buffer, hash, .{});
+ Sha256.hash(buffer, out, .{});
}
pub fn size(self: CodeSignature) u32 {
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 655ba7162f..a13ad9c9f4 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -5,6 +5,7 @@ const build_options = @import("build_options");
const assert = std.debug.assert;
const fs = std.fs;
const link = @import("../../link.zig");
+const load_commands = @import("load_commands.zig");
const log = std.log.scoped(.dsym);
const macho = std.macho;
const makeStaticString = MachO.makeStaticString;
@@ -25,6 +26,8 @@ dwarf: Dwarf,
file: fs.File,
page_size: u16,
+symtab_cmd: macho.symtab_command = .{},
+
segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{},
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
@@ -295,31 +298,21 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
}
}
+ self.finalizeDwarfSegment(macho_file);
+ try self.writeLinkeditSegmentData(macho_file);
+
+ // Write load commands
var lc_buffer = std.ArrayList(u8).init(self.allocator);
defer lc_buffer.deinit();
const lc_writer = lc_buffer.writer();
- var ncmds: u32 = 0;
-
- self.finalizeDwarfSegment(macho_file);
- try self.writeLinkeditSegmentData(macho_file, &ncmds, lc_writer);
-
- {
- try lc_writer.writeStruct(macho_file.uuid);
- ncmds += 1;
- }
-
- var headers_buf = std.ArrayList(u8).init(self.allocator);
- defer headers_buf.deinit();
- try self.writeSegmentHeaders(macho_file, &ncmds, headers_buf.writer());
- try self.file.pwriteAll(headers_buf.items, @sizeOf(macho.mach_header_64));
- try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64) + headers_buf.items.len);
+ try self.writeSegmentHeaders(macho_file, lc_writer);
+ try lc_writer.writeStruct(self.symtab_cmd);
+ try lc_writer.writeStruct(macho_file.uuid_cmd);
- try self.writeHeader(
- macho_file,
- ncmds,
- @intCast(u32, lc_buffer.items.len + headers_buf.items.len),
- );
+ const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
+ try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
+ try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len));
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
@@ -386,7 +379,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
log.debug("found __LINKEDIT segment free space at 0x{x}", .{linkedit.fileoff});
}
-fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, ncmds: *u32, writer: anytype) !void {
+fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) !void {
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
for (macho_file.segments.items[0..end]) |seg, i| {
@@ -416,8 +409,6 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, ncmds: *u32, wri
out_header.offset = 0;
try writer.writeStruct(out_header);
}
-
- ncmds.* += 1;
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
for (self.segments.items) |seg, i| {
@@ -426,7 +417,6 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, ncmds: *u32, wri
for (self.sections.items[indexes.start..indexes.end]) |header| {
try writer.writeStruct(header);
}
- ncmds.* += 1;
}
}
@@ -465,33 +455,19 @@ fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
return min_pos - start;
}
-fn writeLinkeditSegmentData(
- self: *DebugSymbols,
- macho_file: *MachO,
- ncmds: *u32,
- lc_writer: anytype,
-) !void {
+fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- var symtab_cmd = macho.symtab_command{
- .cmdsize = @sizeOf(macho.symtab_command),
- .symoff = 0,
- .nsyms = 0,
- .stroff = 0,
- .strsize = 0,
- };
- try self.writeSymtab(macho_file, &symtab_cmd);
- try self.writeStrtab(&symtab_cmd);
- try lc_writer.writeStruct(symtab_cmd);
- ncmds.* += 1;
+ try self.writeSymtab(macho_file);
+ try self.writeStrtab();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
seg.vmsize = aligned_size;
}
-fn writeSymtab(self: *DebugSymbols, macho_file: *MachO, lc: *macho.symtab_command) !void {
+fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -530,10 +506,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO, lc: *macho.symtab_comman
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
- lc.symoff = @intCast(u32, offset);
- lc.nsyms = @intCast(u32, nsyms);
+ self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.nsyms = @intCast(u32, nsyms);
- const locals_off = lc.symoff;
+ const locals_off = @intCast(u32, offset);
const locals_size = nlocals * @sizeOf(macho.nlist_64);
const exports_off = locals_off + locals_size;
const exports_size = nexports * @sizeOf(macho.nlist_64);
@@ -545,26 +521,26 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO, lc: *macho.symtab_comman
try self.file.pwriteAll(mem.sliceAsBytes(exports.items), exports_off);
}
-fn writeStrtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
+fn writeStrtab(self: *DebugSymbols) !void {
const tracy = trace(@src());
defer tracy.end();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
- const symtab_size = @intCast(u32, lc.nsyms * @sizeOf(macho.nlist_64));
- const offset = mem.alignForwardGeneric(u64, lc.symoff + symtab_size, @alignOf(u64));
+ const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
+ const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64));
seg.filesize = offset + needed_size - seg.fileoff;
- lc.stroff = @intCast(u32, offset);
- lc.strsize = @intCast(u32, needed_size);
+ self.symtab_cmd.stroff = @intCast(u32, offset);
+ self.symtab_cmd.strsize = @intCast(u32, needed_size);
- log.debug("writing string table from 0x{x} to 0x{x}", .{ lc.stroff, lc.stroff + lc.strsize });
+ log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
- try self.file.pwriteAll(self.strtab.buffer.items, lc.stroff);
+ try self.file.pwriteAll(self.strtab.buffer.items, offset);
if (self.strtab.buffer.items.len < needed_size) {
// Ensure we are always padded to the actual length of the file.
- try self.file.pwriteAll(&[_]u8{0}, lc.stroff + lc.strsize);
+ try self.file.pwriteAll(&[_]u8{0}, offset + needed_size);
}
}
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
new file mode 100644
index 0000000000..0e3760526c
--- /dev/null
+++ b/src/link/MachO/load_commands.zig
@@ -0,0 +1,314 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const link = @import("../../link.zig");
+const log = std.log.scoped(.link);
+const macho = std.macho;
+const mem = std.mem;
+
+const Allocator = mem.Allocator;
+const Dylib = @import("Dylib.zig");
+
+pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld";
+
+fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 {
+ const darwin_path_max = 1024;
+ const name_len = if (assume_max_path_len) darwin_path_max else std.mem.len(name) + 1;
+ return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64));
+}
+
+const CalcLCsSizeCtx = struct {
+ segments: []const macho.segment_command_64,
+ dylibs: []const Dylib,
+ referenced_dylibs: []u16,
+ wants_function_starts: bool = true,
+};
+
+fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx, assume_max_path_len: bool) !u32 {
+ var has_text_segment: bool = false;
+ var sizeofcmds: u64 = 0;
+ for (ctx.segments) |seg| {
+ sizeofcmds += seg.nsects * @sizeOf(macho.section_64) + @sizeOf(macho.segment_command_64);
+ if (mem.eql(u8, seg.segName(), "__TEXT")) {
+ has_text_segment = true;
+ }
+ }
+
+ // LC_DYLD_INFO_ONLY
+ sizeofcmds += @sizeOf(macho.dyld_info_command);
+ // LC_FUNCTION_STARTS
+ if (has_text_segment and ctx.wants_function_starts) {
+ sizeofcmds += @sizeOf(macho.linkedit_data_command);
+ }
+ // LC_DATA_IN_CODE
+ sizeofcmds += @sizeOf(macho.linkedit_data_command);
+ // LC_SYMTAB
+ sizeofcmds += @sizeOf(macho.symtab_command);
+ // LC_DYSYMTAB
+ sizeofcmds += @sizeOf(macho.dysymtab_command);
+ // LC_LOAD_DYLINKER
+ sizeofcmds += calcInstallNameLen(
+ @sizeOf(macho.dylinker_command),
+ mem.sliceTo(default_dyld_path, 0),
+ false,
+ );
+ // LC_MAIN
+ if (options.output_mode == .Exe) {
+ sizeofcmds += @sizeOf(macho.entry_point_command);
+ }
+ // LC_ID_DYLIB
+ if (options.output_mode == .Lib and options.link_mode == .Dynamic) {
+ sizeofcmds += blk: {
+ const emit = options.emit.?;
+ const install_name = options.install_name orelse try emit.directory.join(gpa, &.{emit.sub_path});
+ defer if (options.install_name == null) gpa.free(install_name);
+ break :blk calcInstallNameLen(
+ @sizeOf(macho.dylib_command),
+ install_name,
+ assume_max_path_len,
+ );
+ };
+ }
+ // LC_RPATH
+ {
+ var it = RpathIterator.init(gpa, options.rpath_list);
+ defer it.deinit();
+ while (try it.next()) |rpath| {
+ sizeofcmds += calcInstallNameLen(
+ @sizeOf(macho.rpath_command),
+ rpath,
+ assume_max_path_len,
+ );
+ }
+ }
+ // LC_SOURCE_VERSION
+ sizeofcmds += @sizeOf(macho.source_version_command);
+ // LC_BUILD_VERSION
+ sizeofcmds += @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
+ // LC_UUID
+ sizeofcmds += @sizeOf(macho.uuid_command);
+ // LC_LOAD_DYLIB
+ for (ctx.referenced_dylibs) |id| {
+ const dylib = ctx.dylibs[id];
+ const dylib_id = dylib.id orelse unreachable;
+ sizeofcmds += calcInstallNameLen(
+ @sizeOf(macho.dylib_command),
+ dylib_id.name,
+ assume_max_path_len,
+ );
+ }
+ // LC_CODE_SIGNATURE
+ {
+ const target = options.target;
+ const requires_codesig = blk: {
+ if (options.entitlements) |_| break :blk true;
+ if (target.cpu.arch == .aarch64 and (target.os.tag == .macos or target.abi == .simulator))
+ break :blk true;
+ break :blk false;
+ };
+ if (requires_codesig) {
+ sizeofcmds += @sizeOf(macho.linkedit_data_command);
+ }
+ }
+
+ return @intCast(u32, sizeofcmds);
+}
+
+pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 {
+ var padding: u32 = (try calcLCsSize(gpa, options, ctx, false)) + (options.headerpad_size orelse 0);
+ log.debug("minimum requested headerpad size 0x{x}", .{padding + @sizeOf(macho.mach_header_64)});
+
+ if (options.headerpad_max_install_names) {
+ var min_headerpad_size: u32 = try calcLCsSize(gpa, options, ctx, true);
+ log.debug("headerpad_max_install_names minimum headerpad size 0x{x}", .{
+ min_headerpad_size + @sizeOf(macho.mach_header_64),
+ });
+ padding = @max(padding, min_headerpad_size);
+ }
+
+ const offset = @sizeOf(macho.mach_header_64) + padding;
+ log.debug("actual headerpad size 0x{x}", .{offset});
+
+ return offset;
+}
+
+pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
+ var ncmds: u32 = 0;
+ var pos: usize = 0;
+ while (true) {
+ if (pos >= lc_buffer.len) break;
+ const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*;
+ ncmds += 1;
+ pos += cmd.cmdsize;
+ }
+ return ncmds;
+}
+
+pub fn writeDylinkerLC(lc_writer: anytype) !void {
+ const name_len = mem.sliceTo(default_dyld_path, 0).len;
+ const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ u64,
+ @sizeOf(macho.dylinker_command) + name_len,
+ @sizeOf(u64),
+ ));
+ try lc_writer.writeStruct(macho.dylinker_command{
+ .cmd = .LOAD_DYLINKER,
+ .cmdsize = cmdsize,
+ .name = @sizeOf(macho.dylinker_command),
+ });
+ try lc_writer.writeAll(mem.sliceTo(default_dyld_path, 0));
+ const padding = cmdsize - @sizeOf(macho.dylinker_command) - name_len;
+ if (padding > 0) {
+ try lc_writer.writeByteNTimes(0, padding);
+ }
+}
+
+const WriteDylibLCCtx = struct {
+ cmd: macho.LC,
+ name: []const u8,
+ timestamp: u32 = 2,
+ current_version: u32 = 0x10000,
+ compatibility_version: u32 = 0x10000,
+};
+
+fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
+ const name_len = ctx.name.len + 1;
+ const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ u64,
+ @sizeOf(macho.dylib_command) + name_len,
+ @sizeOf(u64),
+ ));
+ try lc_writer.writeStruct(macho.dylib_command{
+ .cmd = ctx.cmd,
+ .cmdsize = cmdsize,
+ .dylib = .{
+ .name = @sizeOf(macho.dylib_command),
+ .timestamp = ctx.timestamp,
+ .current_version = ctx.current_version,
+ .compatibility_version = ctx.compatibility_version,
+ },
+ });
+ try lc_writer.writeAll(ctx.name);
+ try lc_writer.writeByte(0);
+ const padding = cmdsize - @sizeOf(macho.dylib_command) - name_len;
+ if (padding > 0) {
+ try lc_writer.writeByteNTimes(0, padding);
+ }
+}
+
+pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: anytype) !void {
+ assert(options.output_mode == .Lib and options.link_mode == .Dynamic);
+ const emit = options.emit.?;
+ const install_name = options.install_name orelse try emit.directory.join(gpa, &.{emit.sub_path});
+ defer if (options.install_name == null) gpa.free(install_name);
+ const curr = options.version orelse std.builtin.Version{
+ .major = 1,
+ .minor = 0,
+ .patch = 0,
+ };
+ const compat = options.compatibility_version orelse std.builtin.Version{
+ .major = 1,
+ .minor = 0,
+ .patch = 0,
+ };
+ try writeDylibLC(.{
+ .cmd = .ID_DYLIB,
+ .name = install_name,
+ .current_version = curr.major << 16 | curr.minor << 8 | curr.patch,
+ .compatibility_version = compat.major << 16 | compat.minor << 8 | compat.patch,
+ }, lc_writer);
+}
+
+const RpathIterator = struct {
+ buffer: []const []const u8,
+ table: std.StringHashMap(void),
+ count: usize = 0,
+
+ fn init(gpa: Allocator, rpaths: []const []const u8) RpathIterator {
+ return .{ .buffer = rpaths, .table = std.StringHashMap(void).init(gpa) };
+ }
+
+ fn deinit(it: *RpathIterator) void {
+ it.table.deinit();
+ }
+
+ fn next(it: *RpathIterator) !?[]const u8 {
+ while (true) {
+ if (it.count >= it.buffer.len) return null;
+ const rpath = it.buffer[it.count];
+ it.count += 1;
+ const gop = try it.table.getOrPut(rpath);
+ if (gop.found_existing) continue;
+ return rpath;
+ }
+ }
+};
+
+pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: anytype) !void {
+ var it = RpathIterator.init(gpa, options.rpath_list);
+ defer it.deinit();
+
+ while (try it.next()) |rpath| {
+ const rpath_len = rpath.len + 1;
+ const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ u64,
+ @sizeOf(macho.rpath_command) + rpath_len,
+ @sizeOf(u64),
+ ));
+ try lc_writer.writeStruct(macho.rpath_command{
+ .cmdsize = cmdsize,
+ .path = @sizeOf(macho.rpath_command),
+ });
+ try lc_writer.writeAll(rpath);
+ try lc_writer.writeByte(0);
+ const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len;
+ if (padding > 0) {
+ try lc_writer.writeByteNTimes(0, padding);
+ }
+ }
+}
+
+pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !void {
+ const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
+ const platform_version = blk: {
+ const ver = options.target.os.version_range.semver.min;
+ const platform_version = ver.major << 16 | ver.minor << 8;
+ break :blk platform_version;
+ };
+ const sdk_version = if (options.native_darwin_sdk) |sdk| blk: {
+ const ver = sdk.version;
+ const sdk_version = ver.major << 16 | ver.minor << 8;
+ break :blk sdk_version;
+ } else platform_version;
+ const is_simulator_abi = options.target.abi == .simulator;
+ try lc_writer.writeStruct(macho.build_version_command{
+ .cmdsize = cmdsize,
+ .platform = switch (options.target.os.tag) {
+ .macos => .MACOS,
+ .ios => if (is_simulator_abi) macho.PLATFORM.IOSSIMULATOR else macho.PLATFORM.IOS,
+ .watchos => if (is_simulator_abi) macho.PLATFORM.WATCHOSSIMULATOR else macho.PLATFORM.WATCHOS,
+ .tvos => if (is_simulator_abi) macho.PLATFORM.TVOSSIMULATOR else macho.PLATFORM.TVOS,
+ else => unreachable,
+ },
+ .minos = platform_version,
+ .sdk = sdk_version,
+ .ntools = 1,
+ });
+ try lc_writer.writeAll(mem.asBytes(&macho.build_tool_version{
+ .tool = .LD,
+ .version = 0x0,
+ }));
+}
+
+pub fn writeLoadDylibLCs(dylibs: []const Dylib, referenced: []u16, lc_writer: anytype) !void {
+ for (referenced) |index| {
+ const dylib = dylibs[index];
+ const dylib_id = dylib.id orelse unreachable;
+ try writeDylibLC(.{
+ .cmd = if (dylib.weak) .LOAD_WEAK_DYLIB else .LOAD_DYLIB,
+ .name = dylib_id.name,
+ .timestamp = dylib_id.timestamp,
+ .current_version = dylib_id.current_version,
+ .compatibility_version = dylib_id.compatibility_version,
+ }, lc_writer);
+ }
+}
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 9baecd326a..3305267b62 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -13,6 +13,7 @@ const bind = @import("bind.zig");
const dead_strip = @import("dead_strip.zig");
const fat = @import("fat.zig");
const link = @import("../../link.zig");
+const load_commands = @import("load_commands.zig");
const thunks = @import("thunks.zig");
const trace = @import("../../tracy.zig").trace;
@@ -25,6 +26,7 @@ const Compilation = @import("../../Compilation.zig");
const DwarfInfo = @import("DwarfInfo.zig");
const Dylib = @import("Dylib.zig");
const MachO = @import("../MachO.zig");
+const Md5 = std.crypto.hash.Md5;
const LibStub = @import("../tapi.zig").LibStub;
const Object = @import("Object.zig");
const StringTable = @import("../strtab.zig").StringTable;
@@ -34,7 +36,17 @@ pub const Zld = struct {
gpa: Allocator,
file: fs.File,
page_size: u16,
- options: link.Options,
+ options: *const link.Options,
+
+ dyld_info_cmd: macho.dyld_info_command = .{},
+ symtab_cmd: macho.symtab_command = .{},
+ dysymtab_cmd: macho.dysymtab_command = .{},
+ function_starts_cmd: macho.linkedit_data_command = .{ .cmd = .FUNCTION_STARTS },
+ data_in_code_cmd: macho.linkedit_data_command = .{ .cmd = .DATA_IN_CODE },
+ uuid_cmd: macho.uuid_command = .{
+ .uuid = [_]u8{0} ** 16,
+ },
+ codesig_cmd: macho.linkedit_data_command = .{ .cmd = .CODE_SIGNATURE },
objects: std.ArrayListUnmanaged(Object) = .{},
archives: std.ArrayListUnmanaged(Archive) = .{},
@@ -1227,195 +1239,6 @@ pub const Zld = struct {
}
}
- fn writeDylinkerLC(ncmds: *u32, lc_writer: anytype) !void {
- const name_len = mem.sliceTo(MachO.default_dyld_path, 0).len;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.dylinker_command) + name_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.dylinker_command{
- .cmd = .LOAD_DYLINKER,
- .cmdsize = cmdsize,
- .name = @sizeOf(macho.dylinker_command),
- });
- try lc_writer.writeAll(mem.sliceTo(MachO.default_dyld_path, 0));
- const padding = cmdsize - @sizeOf(macho.dylinker_command) - name_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
- }
-
- fn writeMainLC(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- if (self.options.output_mode != .Exe) return;
- const seg_id = self.getSegmentByName("__TEXT").?;
- const seg = self.segments.items[seg_id];
- const global = self.getEntryPoint();
- const sym = self.getSymbol(global);
- try lc_writer.writeStruct(macho.entry_point_command{
- .cmd = .MAIN,
- .cmdsize = @sizeOf(macho.entry_point_command),
- .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
- .stacksize = self.options.stack_size_override orelse 0,
- });
- ncmds.* += 1;
- }
-
- const WriteDylibLCCtx = struct {
- cmd: macho.LC,
- name: []const u8,
- timestamp: u32 = 2,
- current_version: u32 = 0x10000,
- compatibility_version: u32 = 0x10000,
- };
-
- fn writeDylibLC(ctx: WriteDylibLCCtx, ncmds: *u32, lc_writer: anytype) !void {
- const name_len = ctx.name.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.dylib_command) + name_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.dylib_command{
- .cmd = ctx.cmd,
- .cmdsize = cmdsize,
- .dylib = .{
- .name = @sizeOf(macho.dylib_command),
- .timestamp = ctx.timestamp,
- .current_version = ctx.current_version,
- .compatibility_version = ctx.compatibility_version,
- },
- });
- try lc_writer.writeAll(ctx.name);
- try lc_writer.writeByte(0);
- const padding = cmdsize - @sizeOf(macho.dylib_command) - name_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
- }
-
- fn writeDylibIdLC(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- if (self.options.output_mode != .Lib) return;
- const install_name = self.options.install_name orelse self.options.emit.?.sub_path;
- const curr = self.options.version orelse std.builtin.Version{
- .major = 1,
- .minor = 0,
- .patch = 0,
- };
- const compat = self.options.compatibility_version orelse std.builtin.Version{
- .major = 1,
- .minor = 0,
- .patch = 0,
- };
- try writeDylibLC(.{
- .cmd = .ID_DYLIB,
- .name = install_name,
- .current_version = curr.major << 16 | curr.minor << 8 | curr.patch,
- .compatibility_version = compat.major << 16 | compat.minor << 8 | compat.patch,
- }, ncmds, lc_writer);
- }
-
- const RpathIterator = struct {
- buffer: []const []const u8,
- table: std.StringHashMap(void),
- count: usize = 0,
-
- fn init(gpa: Allocator, rpaths: []const []const u8) RpathIterator {
- return .{ .buffer = rpaths, .table = std.StringHashMap(void).init(gpa) };
- }
-
- fn deinit(it: *RpathIterator) void {
- it.table.deinit();
- }
-
- fn next(it: *RpathIterator) !?[]const u8 {
- while (true) {
- if (it.count >= it.buffer.len) return null;
- const rpath = it.buffer[it.count];
- it.count += 1;
- const gop = try it.table.getOrPut(rpath);
- if (gop.found_existing) continue;
- return rpath;
- }
- }
- };
-
- fn writeRpathLCs(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- const gpa = self.gpa;
-
- var it = RpathIterator.init(gpa, self.options.rpath_list);
- defer it.deinit();
-
- while (try it.next()) |rpath| {
- const rpath_len = rpath.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
- u64,
- @sizeOf(macho.rpath_command) + rpath_len,
- @sizeOf(u64),
- ));
- try lc_writer.writeStruct(macho.rpath_command{
- .cmdsize = cmdsize,
- .path = @sizeOf(macho.rpath_command),
- });
- try lc_writer.writeAll(rpath);
- try lc_writer.writeByte(0);
- const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len;
- if (padding > 0) {
- try lc_writer.writeByteNTimes(0, padding);
- }
- ncmds.* += 1;
- }
- }
-
- fn writeBuildVersionLC(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
- const platform_version = blk: {
- const ver = self.options.target.os.version_range.semver.min;
- const platform_version = ver.major << 16 | ver.minor << 8;
- break :blk platform_version;
- };
- const sdk_version = if (self.options.native_darwin_sdk) |sdk| blk: {
- const ver = sdk.version;
- const sdk_version = ver.major << 16 | ver.minor << 8;
- break :blk sdk_version;
- } else platform_version;
- const is_simulator_abi = self.options.target.abi == .simulator;
- try lc_writer.writeStruct(macho.build_version_command{
- .cmdsize = cmdsize,
- .platform = switch (self.options.target.os.tag) {
- .macos => .MACOS,
- .ios => if (is_simulator_abi) macho.PLATFORM.IOSSIMULATOR else macho.PLATFORM.IOS,
- .watchos => if (is_simulator_abi) macho.PLATFORM.WATCHOSSIMULATOR else macho.PLATFORM.WATCHOS,
- .tvos => if (is_simulator_abi) macho.PLATFORM.TVOSSIMULATOR else macho.PLATFORM.TVOS,
- else => unreachable,
- },
- .minos = platform_version,
- .sdk = sdk_version,
- .ntools = 1,
- });
- try lc_writer.writeAll(mem.asBytes(&macho.build_tool_version{
- .tool = .LD,
- .version = 0x0,
- }));
- ncmds.* += 1;
- }
-
- fn writeLoadDylibLCs(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- for (self.referenced_dylibs.keys()) |id| {
- const dylib = self.dylibs.items[id];
- const dylib_id = dylib.id orelse unreachable;
- try writeDylibLC(.{
- .cmd = if (dylib.weak) .LOAD_WEAK_DYLIB else .LOAD_DYLIB,
- .name = dylib_id.name,
- .timestamp = dylib_id.timestamp,
- .current_version = dylib_id.current_version,
- .compatibility_version = dylib_id.compatibility_version,
- }, ncmds, lc_writer);
- }
- }
-
pub fn deinit(self: *Zld) void {
const gpa = self.gpa;
@@ -1516,110 +1339,6 @@ pub const Zld = struct {
}
}
- fn calcLCsSize(self: *Zld, assume_max_path_len: bool) !u32 {
- const gpa = self.gpa;
-
- var sizeofcmds: u64 = 0;
- for (self.segments.items) |seg| {
- sizeofcmds += seg.nsects * @sizeOf(macho.section_64) + @sizeOf(macho.segment_command_64);
- }
-
- // LC_DYLD_INFO_ONLY
- sizeofcmds += @sizeOf(macho.dyld_info_command);
- // LC_FUNCTION_STARTS
- if (self.getSectionByName("__TEXT", "__text")) |_| {
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- }
- // LC_DATA_IN_CODE
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- // LC_SYMTAB
- sizeofcmds += @sizeOf(macho.symtab_command);
- // LC_DYSYMTAB
- sizeofcmds += @sizeOf(macho.dysymtab_command);
- // LC_LOAD_DYLINKER
- sizeofcmds += MachO.calcInstallNameLen(
- @sizeOf(macho.dylinker_command),
- mem.sliceTo(MachO.default_dyld_path, 0),
- false,
- );
- // LC_MAIN
- if (self.options.output_mode == .Exe) {
- sizeofcmds += @sizeOf(macho.entry_point_command);
- }
- // LC_ID_DYLIB
- if (self.options.output_mode == .Lib) {
- sizeofcmds += blk: {
- const install_name = self.options.install_name orelse self.options.emit.?.sub_path;
- break :blk MachO.calcInstallNameLen(
- @sizeOf(macho.dylib_command),
- install_name,
- assume_max_path_len,
- );
- };
- }
- // LC_RPATH
- {
- var it = RpathIterator.init(gpa, self.options.rpath_list);
- defer it.deinit();
- while (try it.next()) |rpath| {
- sizeofcmds += MachO.calcInstallNameLen(
- @sizeOf(macho.rpath_command),
- rpath,
- assume_max_path_len,
- );
- }
- }
- // LC_SOURCE_VERSION
- sizeofcmds += @sizeOf(macho.source_version_command);
- // LC_BUILD_VERSION
- sizeofcmds += @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
- // LC_UUID
- sizeofcmds += @sizeOf(macho.uuid_command);
- // LC_LOAD_DYLIB
- for (self.referenced_dylibs.keys()) |id| {
- const dylib = self.dylibs.items[id];
- const dylib_id = dylib.id orelse unreachable;
- sizeofcmds += MachO.calcInstallNameLen(
- @sizeOf(macho.dylib_command),
- dylib_id.name,
- assume_max_path_len,
- );
- }
- // LC_CODE_SIGNATURE
- {
- const target = self.options.target;
- const requires_codesig = blk: {
- if (self.options.entitlements) |_| break :blk true;
- if (target.cpu.arch == .aarch64 and (target.os.tag == .macos or target.abi == .simulator))
- break :blk true;
- break :blk false;
- };
- if (requires_codesig) {
- sizeofcmds += @sizeOf(macho.linkedit_data_command);
- }
- }
-
- return @intCast(u32, sizeofcmds);
- }
-
- fn calcMinHeaderPad(self: *Zld) !u64 {
- var padding: u32 = (try self.calcLCsSize(false)) + (self.options.headerpad_size orelse 0);
- log.debug("minimum requested headerpad size 0x{x}", .{padding + @sizeOf(macho.mach_header_64)});
-
- if (self.options.headerpad_max_install_names) {
- var min_headerpad_size: u32 = try self.calcLCsSize(true);
- log.debug("headerpad_max_install_names minimum headerpad size 0x{x}", .{
- min_headerpad_size + @sizeOf(macho.mach_header_64),
- });
- padding = @max(padding, min_headerpad_size);
- }
-
- const offset = @sizeOf(macho.mach_header_64) + padding;
- log.debug("actual headerpad size 0x{x}", .{offset});
-
- return offset;
- }
-
pub fn allocateSymbol(self: *Zld) !u32 {
try self.locals.ensureUnusedCapacity(self.gpa, 1);
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
@@ -1842,7 +1561,11 @@ pub const Zld = struct {
fn allocateSegments(self: *Zld) !void {
for (self.segments.items) |*segment, segment_index| {
const is_text_segment = mem.eql(u8, segment.segName(), "__TEXT");
- const base_size = if (is_text_segment) try self.calcMinHeaderPad() else 0;
+ const base_size = if (is_text_segment) try load_commands.calcMinHeaderPad(self.gpa, self.options, .{
+ .segments = self.segments.items,
+ .dylibs = self.dylibs.items,
+ .referenced_dylibs = self.referenced_dylibs.keys(),
+ }) else 0;
try self.allocateSegment(@intCast(u8, segment_index), base_size);
}
}
@@ -2015,7 +1738,7 @@ pub const Zld = struct {
return (@intCast(u8, segment_precedence) << 4) + section_precedence;
}
- fn writeSegmentHeaders(self: *Zld, ncmds: *u32, writer: anytype) !void {
+ fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
for (self.segments.items) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
@@ -2039,16 +1762,14 @@ pub const Zld = struct {
if (header.size == 0) continue;
try writer.writeStruct(header);
}
-
- ncmds.* += 1;
}
}
- fn writeLinkeditSegmentData(self: *Zld, ncmds: *u32, lc_writer: anytype, reverse_lookups: [][]u32) !void {
- try self.writeDyldInfoData(ncmds, lc_writer, reverse_lookups);
- try self.writeFunctionStarts(ncmds, lc_writer);
- try self.writeDataInCode(ncmds, lc_writer);
- try self.writeSymtabs(ncmds, lc_writer);
+ fn writeLinkeditSegmentData(self: *Zld, reverse_lookups: [][]u32) !void {
+ try self.writeDyldInfoData(reverse_lookups);
+ try self.writeFunctionStarts();
+ try self.writeDataInCode();
+ try self.writeSymtabs();
const seg = self.getLinkeditSegmentPtr();
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
@@ -2437,7 +2158,7 @@ pub const Zld = struct {
try trie.finalize(gpa);
}
- fn writeDyldInfoData(self: *Zld, ncmds: *u32, lc_writer: anytype, reverse_lookups: [][]u32) !void {
+ fn writeDyldInfoData(self: *Zld, reverse_lookups: [][]u32) !void {
const gpa = self.gpa;
var rebase_pointers = std.ArrayList(bind.Pointer).init(gpa);
@@ -2457,25 +2178,34 @@ pub const Zld = struct {
try self.collectExportData(&trie);
const link_seg = self.getLinkeditSegmentPtr();
- const rebase_off = mem.alignForwardGeneric(u64, link_seg.fileoff, @alignOf(u64));
- assert(rebase_off == link_seg.fileoff);
+ assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
+ const rebase_off = link_seg.fileoff;
const rebase_size = try bind.rebaseInfoSize(rebase_pointers.items);
- log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size });
+ const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
+ log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
- const bind_off = mem.alignForwardGeneric(u64, rebase_off + rebase_size, @alignOf(u64));
+ const bind_off = rebase_off + rebase_size_aligned;
const bind_size = try bind.bindInfoSize(bind_pointers.items);
- log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size });
+ const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
+ log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
- const lazy_bind_off = mem.alignForwardGeneric(u64, bind_off + bind_size, @alignOf(u64));
+ const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = try bind.lazyBindInfoSize(lazy_bind_pointers.items);
- log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size });
+ const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
+ log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
+ lazy_bind_off,
+ lazy_bind_off + lazy_bind_size_aligned,
+ });
- const export_off = mem.alignForwardGeneric(u64, lazy_bind_off + lazy_bind_size, @alignOf(u64));
+ const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
- log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size });
+ const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
+ log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
- const needed_size = math.cast(usize, export_off + export_size - rebase_off) orelse return error.Overflow;
+ const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
+ return error.Overflow;
link_seg.filesize = needed_size;
+ assert(mem.isAlignedGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64)));
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
@@ -2506,21 +2236,14 @@ pub const Zld = struct {
const size = math.cast(usize, lazy_bind_size) orelse return error.Overflow;
try self.populateLazyBindOffsetsInStubHelper(buffer[offset..][0..size]);
- try lc_writer.writeStruct(macho.dyld_info_command{
- .cmd = .DYLD_INFO_ONLY,
- .cmdsize = @sizeOf(macho.dyld_info_command),
- .rebase_off = @intCast(u32, rebase_off),
- .rebase_size = @intCast(u32, rebase_size),
- .bind_off = @intCast(u32, bind_off),
- .bind_size = @intCast(u32, bind_size),
- .weak_bind_off = 0,
- .weak_bind_size = 0,
- .lazy_bind_off = @intCast(u32, lazy_bind_off),
- .lazy_bind_size = @intCast(u32, lazy_bind_size),
- .export_off = @intCast(u32, export_off),
- .export_size = @intCast(u32, export_size),
- });
- ncmds.* += 1;
+ self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
+ self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
+ self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
+ self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
+ self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
+ self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
+ self.dyld_info_cmd.export_off = @intCast(u32, export_off);
+ self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
}
fn populateLazyBindOffsetsInStubHelper(self: *Zld, buffer: []const u8) !void {
@@ -2638,7 +2361,7 @@ pub const Zld = struct {
const asc_u64 = std.sort.asc(u64);
- fn writeFunctionStarts(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
+ fn writeFunctionStarts(self: *Zld) !void {
const text_seg_index = self.getSegmentByName("__TEXT") orelse return;
const text_sect_index = self.getSectionByName("__TEXT", "__text") orelse return;
const text_seg = self.segments.items[text_seg_index];
@@ -2689,21 +2412,23 @@ pub const Zld = struct {
}
const link_seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64));
+ const offset = link_seg.fileoff + link_seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = buffer.items.len;
- link_seg.filesize = offset + needed_size - link_seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
+ if (padding > 0) {
+ try buffer.ensureUnusedCapacity(padding);
+ buffer.appendNTimesAssumeCapacity(0, padding);
+ }
+ link_seg.filesize = offset + needed_size_aligned - link_seg.fileoff;
- log.debug("writing function starts info from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ log.debug("writing function starts info from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(buffer.items, offset);
- try lc_writer.writeStruct(macho.linkedit_data_command{
- .cmd = .FUNCTION_STARTS,
- .cmdsize = @sizeOf(macho.linkedit_data_command),
- .dataoff = @intCast(u32, offset),
- .datasize = @intCast(u32, needed_size),
- });
- ncmds.* += 1;
+ self.function_starts_cmd.dataoff = @intCast(u32, offset);
+ self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned);
}
fn filterDataInCode(
@@ -2725,7 +2450,7 @@ pub const Zld = struct {
return dices[start..end];
}
- fn writeDataInCode(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
+ fn writeDataInCode(self: *Zld) !void {
var out_dice = std.ArrayList(macho.data_in_code_entry).init(self.gpa);
defer out_dice.deinit();
@@ -2768,61 +2493,33 @@ pub const Zld = struct {
}
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry);
- seg.filesize = offset + needed_size - seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ seg.filesize = offset + needed_size_aligned - seg.fileoff;
- log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
+ defer self.gpa.free(buffer);
+ mem.set(u8, buffer, 0);
+ mem.copy(u8, buffer, mem.sliceAsBytes(out_dice.items));
- try self.file.pwriteAll(mem.sliceAsBytes(out_dice.items), offset);
- try lc_writer.writeStruct(macho.linkedit_data_command{
- .cmd = .DATA_IN_CODE,
- .cmdsize = @sizeOf(macho.linkedit_data_command),
- .dataoff = @intCast(u32, offset),
- .datasize = @intCast(u32, needed_size),
- });
- ncmds.* += 1;
+ log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
+
+ try self.file.pwriteAll(buffer, offset);
+
+ self.data_in_code_cmd.dataoff = @intCast(u32, offset);
+ self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned);
}
- fn writeSymtabs(self: *Zld, ncmds: *u32, lc_writer: anytype) !void {
- var symtab_cmd = macho.symtab_command{
- .cmdsize = @sizeOf(macho.symtab_command),
- .symoff = 0,
- .nsyms = 0,
- .stroff = 0,
- .strsize = 0,
- };
- var dysymtab_cmd = macho.dysymtab_command{
- .cmdsize = @sizeOf(macho.dysymtab_command),
- .ilocalsym = 0,
- .nlocalsym = 0,
- .iextdefsym = 0,
- .nextdefsym = 0,
- .iundefsym = 0,
- .nundefsym = 0,
- .tocoff = 0,
- .ntoc = 0,
- .modtaboff = 0,
- .nmodtab = 0,
- .extrefsymoff = 0,
- .nextrefsyms = 0,
- .indirectsymoff = 0,
- .nindirectsyms = 0,
- .extreloff = 0,
- .nextrel = 0,
- .locreloff = 0,
- .nlocrel = 0,
- };
- var ctx = try self.writeSymtab(&symtab_cmd);
+ fn writeSymtabs(self: *Zld) !void {
+ var ctx = try self.writeSymtab();
defer ctx.imports_table.deinit();
- try self.writeDysymtab(ctx, &dysymtab_cmd);
- try self.writeStrtab(&symtab_cmd);
- try lc_writer.writeStruct(symtab_cmd);
- try lc_writer.writeStruct(dysymtab_cmd);
- ncmds.* += 2;
+ try self.writeDysymtab(ctx);
+ try self.writeStrtab();
}
- fn writeSymtab(self: *Zld, lc: *macho.symtab_command) !SymtabCtx {
+ fn writeSymtab(self: *Zld) !SymtabCtx {
const gpa = self.gpa;
var locals = std.ArrayList(macho.nlist_64).init(gpa);
@@ -2843,12 +2540,6 @@ pub const Zld = struct {
}
}
- if (!self.options.strip) {
- for (self.objects.items) |object| {
- try self.generateSymbolStabs(object, &locals);
- }
- }
-
var exports = std.ArrayList(macho.nlist_64).init(gpa);
defer exports.deinit();
@@ -2879,19 +2570,25 @@ pub const Zld = struct {
try imports_table.putNoClobber(global, new_index);
}
+ // We generate stabs last in order to ensure that the strtab always has debug info
+ // strings trailing
+ if (!self.options.strip) {
+ for (self.objects.items) |object| {
+ try self.generateSymbolStabs(object, &locals);
+ }
+ }
+
const nlocals = @intCast(u32, locals.items.len);
const nexports = @intCast(u32, exports.items.len);
const nimports = @intCast(u32, imports.items.len);
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(
- u64,
- seg.fileoff + seg.filesize,
- @alignOf(macho.nlist_64),
- );
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
+ assert(mem.isAlignedGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64)));
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -2903,8 +2600,8 @@ pub const Zld = struct {
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.file.pwriteAll(buffer.items, offset);
- lc.symoff = @intCast(u32, offset);
- lc.nsyms = nsyms;
+ self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
.nlocalsym = nlocals,
@@ -2914,18 +2611,25 @@ pub const Zld = struct {
};
}
- fn writeStrtab(self: *Zld, lc: *macho.symtab_command) !void {
+ fn writeStrtab(self: *Zld) !void {
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
- seg.filesize = offset + needed_size - seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ seg.filesize = offset + needed_size_aligned - seg.fileoff;
+
+ log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
- log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
+ defer self.gpa.free(buffer);
+ mem.set(u8, buffer, 0);
+ mem.copy(u8, buffer, self.strtab.buffer.items);
- try self.file.pwriteAll(self.strtab.buffer.items, offset);
+ try self.file.pwriteAll(buffer, offset);
- lc.stroff = @intCast(u32, offset);
- lc.strsize = @intCast(u32, needed_size);
+ self.symtab_cmd.stroff = @intCast(u32, offset);
+ self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
}
const SymtabCtx = struct {
@@ -2935,7 +2639,7 @@ pub const Zld = struct {
imports_table: std.AutoHashMap(SymbolWithLoc, u32),
};
- fn writeDysymtab(self: *Zld, ctx: SymtabCtx, lc: *macho.dysymtab_command) !void {
+ fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void {
const gpa = self.gpa;
const nstubs = @intCast(u32, self.stubs.items.len);
const ngot_entries = @intCast(u32, self.got_entries.items.len);
@@ -2944,15 +2648,17 @@ pub const Zld = struct {
const iundefsym = iextdefsym + ctx.nextdefsym;
const seg = self.getLinkeditSegmentPtr();
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
+ const offset = seg.fileoff + seg.filesize;
+ assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
- seg.filesize = offset + needed_size - seg.fileoff;
+ const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ seg.filesize = offset + needed_size_aligned - seg.fileoff;
- log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
+ log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
- try buf.ensureTotalCapacity(needed_size);
+ try buf.ensureTotalCapacityPrecise(math.cast(usize, needed_size_aligned) orelse return error.Overflow);
const writer = buf.writer();
if (self.getSectionByName("__TEXT", "__stubs")) |sect_id| {
@@ -2988,24 +2694,170 @@ pub const Zld = struct {
}
}
- assert(buf.items.len == needed_size);
+ const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
+ if (padding > 0) {
+ buf.appendNTimesAssumeCapacity(0, padding);
+ }
+
+ assert(buf.items.len == needed_size_aligned);
try self.file.pwriteAll(buf.items, offset);
- lc.nlocalsym = ctx.nlocalsym;
- lc.iextdefsym = iextdefsym;
- lc.nextdefsym = ctx.nextdefsym;
- lc.iundefsym = iundefsym;
- lc.nundefsym = ctx.nundefsym;
- lc.indirectsymoff = @intCast(u32, offset);
- lc.nindirectsyms = nindirectsyms;
+ self.dysymtab_cmd.nlocalsym = ctx.nlocalsym;
+ self.dysymtab_cmd.iextdefsym = iextdefsym;
+ self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
+ self.dysymtab_cmd.iundefsym = iundefsym;
+ self.dysymtab_cmd.nundefsym = ctx.nundefsym;
+ self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+ self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
- fn writeCodeSignaturePadding(
- self: *Zld,
- code_sig: *CodeSignature,
- ncmds: *u32,
- lc_writer: anytype,
- ) !u32 {
+ fn writeUuid(self: *Zld, comp: *const Compilation, args: struct {
+ linkedit_cmd_offset: u32,
+ symtab_cmd_offset: u32,
+ uuid_cmd_offset: u32,
+ codesig_cmd_offset: ?u32,
+ }) !void {
+ _ = comp;
+ switch (self.options.optimize_mode) {
+ .Debug => {
+ // In Debug we don't really care about reproducibility, so put in a random value
+ // and be done with it.
+ std.crypto.random.bytes(&self.uuid_cmd.uuid);
+ Md5.hash(&self.uuid_cmd.uuid, &self.uuid_cmd.uuid, .{});
+ conformUuid(&self.uuid_cmd.uuid);
+ },
+ else => {
+ // We set the max file size to the actual strtab buffer length to exclude any strtab padding.
+ const max_file_end = @intCast(u32, self.symtab_cmd.stroff + self.strtab.buffer.items.len);
+
+ const FileSubsection = struct {
+ start: u32,
+ end: u32,
+ };
+
+ var subsections: [5]FileSubsection = undefined;
+ var count: usize = 0;
+
+ // Exclude LINKEDIT segment command as it contains file size that includes stabs contribution
+ // and code signature.
+ subsections[count] = .{
+ .start = 0,
+ .end = args.linkedit_cmd_offset,
+ };
+ count += 1;
+
+ // Exclude SYMTAB and DYSYMTAB commands for the same reason.
+ subsections[count] = .{
+ .start = subsections[count - 1].end + @sizeOf(macho.segment_command_64),
+ .end = args.symtab_cmd_offset,
+ };
+ count += 1;
+
+ // Exclude CODE_SIGNATURE command (if present).
+ if (args.codesig_cmd_offset) |offset| {
+ subsections[count] = .{
+ .start = subsections[count - 1].end + @sizeOf(macho.symtab_command) + @sizeOf(macho.dysymtab_command),
+ .end = offset,
+ };
+ count += 1;
+ }
+
+ if (!self.options.strip) {
+ // Exclude region comprising all symbol stabs.
+ const nlocals = self.dysymtab_cmd.nlocalsym;
+
+ const locals_buf = try self.gpa.alloc(u8, nlocals * @sizeOf(macho.nlist_64));
+ defer self.gpa.free(locals_buf);
+
+ const amt = try self.file.preadAll(locals_buf, self.symtab_cmd.symoff);
+ if (amt != locals_buf.len) return error.InputOutput;
+ const locals = @ptrCast([*]macho.nlist_64, @alignCast(@alignOf(macho.nlist_64), locals_buf))[0..nlocals];
+
+ const istab: usize = for (locals) |local, i| {
+ if (local.stab()) break i;
+ } else locals.len;
+ const nstabs = locals.len - istab;
+
+ if (nstabs == 0) {
+ subsections[count] = .{
+ .start = subsections[count - 1].end + if (args.codesig_cmd_offset == null)
+ @as(u32, @sizeOf(macho.symtab_command) + @sizeOf(macho.dysymtab_command))
+ else
+ @sizeOf(macho.linkedit_data_command),
+ .end = max_file_end,
+ };
+ count += 1;
+ } else {
+ // Exclude a subsection of the strtab with names of the stabs.
+ // We do not care about anything succeeding strtab as it is the code signature data which is
+ // not part of the UUID calculation anyway.
+ const stab_stroff = locals[istab].n_strx;
+
+ subsections[count] = .{
+ .start = subsections[count - 1].end + if (args.codesig_cmd_offset == null)
+ @as(u32, @sizeOf(macho.symtab_command) + @sizeOf(macho.dysymtab_command))
+ else
+ @sizeOf(macho.linkedit_data_command),
+ .end = @intCast(u32, self.symtab_cmd.symoff + istab * @sizeOf(macho.nlist_64)),
+ };
+ count += 1;
+
+ subsections[count] = .{
+ .start = subsections[count - 1].end + @intCast(u32, nstabs * @sizeOf(macho.nlist_64)),
+ .end = self.symtab_cmd.stroff + stab_stroff,
+ };
+ count += 1;
+ }
+ } else {
+ subsections[count] = .{
+ .start = subsections[count - 1].end + if (args.codesig_cmd_offset == null)
+ @as(u32, @sizeOf(macho.symtab_command) + @sizeOf(macho.dysymtab_command))
+ else
+ @sizeOf(macho.linkedit_data_command),
+ .end = max_file_end,
+ };
+ count += 1;
+ }
+
+ const chunk_size = 0x4000;
+
+ var hasher = Md5.init(.{});
+ var buffer: [chunk_size]u8 = undefined;
+
+ for (subsections[0..count]) |cut| {
+ const size = cut.end - cut.start;
+ const num_chunks = mem.alignForward(size, chunk_size) / chunk_size;
+
+ var i: usize = 0;
+ while (i < num_chunks) : (i += 1) {
+ const fstart = cut.start + i * chunk_size;
+ const fsize = if (fstart + chunk_size > cut.end)
+ cut.end - fstart
+ else
+ chunk_size;
+ const amt = try self.file.preadAll(buffer[0..fsize], fstart);
+ if (amt != fsize) return error.InputOutput;
+
+ hasher.update(buffer[0..fsize]);
+ }
+ }
+
+ hasher.final(&self.uuid_cmd.uuid);
+ conformUuid(&self.uuid_cmd.uuid);
+ },
+ }
+
+ const in_file = args.uuid_cmd_offset + @sizeOf(macho.load_command);
+ try self.file.pwriteAll(&self.uuid_cmd.uuid, in_file);
+ }
+
+ inline fn conformUuid(out: *[Md5.digest_length]u8) void {
+ // LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
+ out[6] = (out[6] & 0x0F) | (3 << 4);
+ out[8] = (out[8] & 0x3F) | 0x80;
+ }
+
+ fn writeCodeSignaturePadding(self: *Zld, code_sig: *CodeSignature) !void {
const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
@@ -3018,23 +2870,11 @@ pub const Zld = struct {
// except for code signature data.
try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
- try lc_writer.writeStruct(macho.linkedit_data_command{
- .cmd = .CODE_SIGNATURE,
- .cmdsize = @sizeOf(macho.linkedit_data_command),
- .dataoff = @intCast(u32, offset),
- .datasize = @intCast(u32, needed_size),
- });
- ncmds.* += 1;
-
- return @intCast(u32, offset);
+ self.codesig_cmd.dataoff = @intCast(u32, offset);
+ self.codesig_cmd.datasize = @intCast(u32, needed_size);
}
- fn writeCodeSignature(
- self: *Zld,
- comp: *const Compilation,
- code_sig: *CodeSignature,
- offset: u32,
- ) !void {
+ fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void {
const seg_id = self.getSegmentByName("__TEXT").?;
const seg = self.segments.items[seg_id];
@@ -3045,17 +2885,17 @@ pub const Zld = struct {
.file = self.file,
.exec_seg_base = seg.fileoff,
.exec_seg_limit = seg.filesize,
- .file_size = offset,
+ .file_size = self.codesig_cmd.dataoff,
.output_mode = self.options.output_mode,
}, buffer.writer());
assert(buffer.items.len == code_sig.size());
log.debug("writing code signature from 0x{x} to 0x{x}", .{
- offset,
- offset + buffer.items.len,
+ self.codesig_cmd.dataoff,
+ self.codesig_cmd.dataoff + buffer.items.len,
});
- try self.file.pwriteAll(buffer.items, offset);
+ try self.file.pwriteAll(buffer.items, self.codesig_cmd.dataoff);
}
/// Writes Mach-O file header.
@@ -3734,7 +3574,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
defer tracy.end();
const gpa = macho_file.base.allocator;
- const options = macho_file.base.options;
+ const options = &macho_file.base.options;
const target = options.target;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
@@ -3884,7 +3724,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
macho_file.base.file = try directory.handle.createFile(sub_path, .{
.truncate = true,
.read = true,
- .mode = link.determineMode(options),
+ .mode = link.determineMode(options.*),
});
}
var zld = Zld{
@@ -4271,12 +4111,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
}
try zld.writeAtoms(reverse_lookups);
-
- var lc_buffer = std.ArrayList(u8).init(arena);
- const lc_writer = lc_buffer.writer();
- var ncmds: u32 = 0;
-
- try zld.writeLinkeditSegmentData(&ncmds, lc_writer, reverse_lookups);
+ try zld.writeLinkeditSegmentData(reverse_lookups);
// If the last section of __DATA segment is zerofill section, we need to ensure
// that the free space between the end of the last non-zerofill section of __DATA
@@ -4301,39 +4136,12 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
}
}
- try Zld.writeDylinkerLC(&ncmds, lc_writer);
- try zld.writeMainLC(&ncmds, lc_writer);
- try zld.writeDylibIdLC(&ncmds, lc_writer);
- try zld.writeRpathLCs(&ncmds, lc_writer);
-
- {
- try lc_writer.writeStruct(macho.source_version_command{
- .cmdsize = @sizeOf(macho.source_version_command),
- .version = 0x0,
- });
- ncmds += 1;
- }
-
- try zld.writeBuildVersionLC(&ncmds, lc_writer);
-
- {
- var uuid_lc = macho.uuid_command{
- .cmdsize = @sizeOf(macho.uuid_command),
- .uuid = undefined,
- };
- std.crypto.random.bytes(&uuid_lc.uuid);
- try lc_writer.writeStruct(uuid_lc);
- ncmds += 1;
- }
-
- try zld.writeLoadDylibLCs(&ncmds, lc_writer);
-
+ // Write code signature padding if required
const requires_codesig = blk: {
if (options.entitlements) |_| break :blk true;
if (cpu_arch == .aarch64 and (os_tag == .macos or abi == .simulator)) break :blk true;
break :blk false;
};
- var codesig_offset: ?u32 = null;
var codesig: ?CodeSignature = if (requires_codesig) blk: {
// Preallocate space for the code signature.
// We need to do this at this stage so that we have the load commands with proper values
@@ -4341,24 +4149,76 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
// The most important here is to have the correct vm and filesize of the __LINKEDIT segment
// where the code signature goes into.
var codesig = CodeSignature.init(page_size);
- codesig.code_directory.ident = options.emit.?.sub_path;
+ codesig.code_directory.ident = fs.path.basename(full_out_path);
if (options.entitlements) |path| {
- try codesig.addEntitlements(gpa, path);
+ try codesig.addEntitlements(zld.gpa, path);
}
- codesig_offset = try zld.writeCodeSignaturePadding(&codesig, &ncmds, lc_writer);
+ try zld.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
- defer if (codesig) |*csig| csig.deinit(gpa);
+ defer if (codesig) |*csig| csig.deinit(zld.gpa);
+
+ // Write load commands
+ var lc_buffer = std.ArrayList(u8).init(arena);
+ const lc_writer = lc_buffer.writer();
+
+ try zld.writeSegmentHeaders(lc_writer);
+ const linkedit_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len - @sizeOf(macho.segment_command_64));
+
+ try lc_writer.writeStruct(zld.dyld_info_cmd);
+ try lc_writer.writeStruct(zld.function_starts_cmd);
+ try lc_writer.writeStruct(zld.data_in_code_cmd);
+
+ const symtab_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ try lc_writer.writeStruct(zld.symtab_cmd);
+ try lc_writer.writeStruct(zld.dysymtab_cmd);
+
+ try load_commands.writeDylinkerLC(lc_writer);
- var headers_buf = std.ArrayList(u8).init(arena);
- try zld.writeSegmentHeaders(&ncmds, headers_buf.writer());
+ if (zld.options.output_mode == .Exe) {
+ const seg_id = zld.getSegmentByName("__TEXT").?;
+ const seg = zld.segments.items[seg_id];
+ const global = zld.getEntryPoint();
+ const sym = zld.getSymbol(global);
+ try lc_writer.writeStruct(macho.entry_point_command{
+ .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
+ .stacksize = options.stack_size_override orelse 0,
+ });
+ } else {
+ assert(zld.options.output_mode == .Lib);
+ try load_commands.writeDylibIdLC(zld.gpa, zld.options, lc_writer);
+ }
+
+ try load_commands.writeRpathLCs(zld.gpa, zld.options, lc_writer);
+ try lc_writer.writeStruct(macho.source_version_command{
+ .version = 0,
+ });
+ try load_commands.writeBuildVersionLC(zld.options, lc_writer);
+
+ const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ try lc_writer.writeStruct(zld.uuid_cmd);
- try zld.file.pwriteAll(headers_buf.items, @sizeOf(macho.mach_header_64));
- try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64) + headers_buf.items.len);
- try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len + headers_buf.items.len));
+ try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer);
+
+ var codesig_cmd_offset: ?u32 = null;
+ if (requires_codesig) {
+ codesig_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ try lc_writer.writeStruct(zld.codesig_cmd);
+ }
+
+ const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
+ try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
+ try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+
+ try zld.writeUuid(comp, .{
+ .linkedit_cmd_offset = linkedit_cmd_offset,
+ .symtab_cmd_offset = symtab_cmd_offset,
+ .uuid_cmd_offset = uuid_cmd_offset,
+ .codesig_cmd_offset = codesig_cmd_offset,
+ });
if (codesig) |*csig| {
- try zld.writeCodeSignature(comp, csig, codesig_offset.?); // code signing always comes last
+ try zld.writeCodeSignature(comp, csig); // code signing always comes last
}
}
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 2d221463c9..5423269bf0 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -201,7 +201,10 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
const decl = mod.declPtr(decl_index);
const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope());
if (fn_map_res.found_existing) {
- try fn_map_res.value_ptr.functions.put(gpa, decl_index, out);
+ if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| {
+ gpa.free(old_entry.value.code);
+ gpa.free(old_entry.value.lineinfo);
+ }
} else {
const file = decl.getFileScope();
const arena = self.path_arena.allocator();
@@ -408,9 +411,11 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
return;
},
};
- var duped_code = try self.base.allocator.dupe(u8, code);
- errdefer self.base.allocator.free(duped_code);
- try self.data_decl_table.put(self.base.allocator, decl_index, duped_code);
+ try self.data_decl_table.ensureUnusedCapacity(self.base.allocator, 1);
+ const duped_code = try self.base.allocator.dupe(u8, code);
+ if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
+ self.base.allocator.free(old_entry.value);
+ }
return self.updateFinish(decl);
}
/// called at the end of update{Decl,Func}
@@ -743,14 +748,19 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
if (is_fn) {
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?;
var submap = symidx_and_submap.functions;
- _ = submap.swapRemove(decl_index);
+ if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
+ self.base.allocator.free(removed_entry.value.code);
+ self.base.allocator.free(removed_entry.value.lineinfo);
+ }
if (submap.count() == 0) {
self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
self.syms_index_free_list.append(self.base.allocator, symidx_and_submap.sym_index) catch {};
submap.deinit(self.base.allocator);
}
} else {
- _ = self.data_decl_table.swapRemove(decl_index);
+ if (self.data_decl_table.fetchSwapRemove(decl_index)) |removed_entry| {
+ self.base.allocator.free(removed_entry.value);
+ }
}
if (decl.link.plan9.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index fd708f794f..7dbd3a42ce 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -314,7 +314,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
for (air_tags) |tag, i| {
switch (tag) {
- .arg, .alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena),
+ .alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena),
else => {},
}
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 74a303b8c7..d62d5adb25 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -311,53 +311,86 @@ pub const StringTable = struct {
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
assert(options.target.ofmt == .wasm);
- if (build_options.have_llvm and options.use_llvm) {
+ if (build_options.have_llvm and options.use_llvm and options.use_lld) {
return createEmpty(allocator, options);
}
const wasm_bin = try createEmpty(allocator, options);
errdefer wasm_bin.base.destroy();
+ // We are not using LLD at this point, so ensure we set the intermediary basename
+ if (build_options.have_llvm and options.use_llvm and options.module != null) {
+ // TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
+ // we also want to put the intermediary object file in the cache while the
+ // main emit directory is the cwd.
+ wasm_bin.base.intermediary_basename = try std.fmt.allocPrint(allocator, "{s}{s}", .{
+ options.emit.?.sub_path, options.target.ofmt.fileExt(options.target.cpu.arch),
+ });
+ }
+
// TODO: read the file and keep valid parts instead of truncating
const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true });
wasm_bin.base.file = file;
wasm_bin.name = sub_path;
- // As sym_index '0' is reserved, we use it for our stack pointer symbol
- const sym_name = try wasm_bin.string_table.put(allocator, "__stack_pointer");
- const symbol = try wasm_bin.symbols.addOne(allocator);
- symbol.* = .{
- .name = sym_name,
- .tag = .global,
- .flags = 0,
- .index = 0,
- };
- const loc: SymbolLoc = .{ .file = null, .index = 0 };
- try wasm_bin.resolved_symbols.putNoClobber(allocator, loc, {});
- try wasm_bin.globals.putNoClobber(allocator, sym_name, loc);
-
- // For object files we will import the stack pointer symbol
- if (options.output_mode == .Obj) {
- symbol.setUndefined(true);
- try wasm_bin.imports.putNoClobber(
- allocator,
- .{ .file = null, .index = 0 },
- .{
- .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
- .name = sym_name,
- .kind = .{ .global = .{ .valtype = .i32, .mutable = true } },
- },
- );
- } else {
- symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
- const global = try wasm_bin.wasm_globals.addOne(allocator);
- global.* = .{
- .global_type = .{
- .valtype = .i32,
- .mutable = true,
- },
- .init = .{ .i32_const = 0 },
+ // create stack pointer symbol
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__stack_pointer", .global);
+ const symbol = loc.getSymbol(wasm_bin);
+ // For object files we will import the stack pointer symbol
+ if (options.output_mode == .Obj) {
+ symbol.setUndefined(true);
+ symbol.index = @intCast(u32, wasm_bin.imported_globals_count);
+ wasm_bin.imported_globals_count += 1;
+ try wasm_bin.imports.putNoClobber(
+ allocator,
+ loc,
+ .{
+ .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
+ .name = symbol.name,
+ .kind = .{ .global = .{ .valtype = .i32, .mutable = true } },
+ },
+ );
+ } else {
+ symbol.index = @intCast(u32, wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ const global = try wasm_bin.wasm_globals.addOne(allocator);
+ global.* = .{
+ .global_type = .{
+ .valtype = .i32,
+ .mutable = true,
+ },
+ .init = .{ .i32_const = 0 },
+ };
+ }
+ }
+
+ // create indirect function pointer symbol
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__indirect_function_table", .table);
+ const symbol = loc.getSymbol(wasm_bin);
+ const table: std.wasm.Table = .{
+ .limits = .{ .min = 0, .max = null }, // will be overwritten during `mapFunctionTable`
+ .reftype = .funcref,
};
+ if (options.output_mode == .Obj or options.import_table) {
+ symbol.setUndefined(true);
+ symbol.index = @intCast(u32, wasm_bin.imported_tables_count);
+ wasm_bin.imported_tables_count += 1;
+ try wasm_bin.imports.put(allocator, loc, .{
+ .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
+ .name = symbol.name,
+ .kind = .{ .table = table },
+ });
+ } else {
+ symbol.index = @intCast(u32, wasm_bin.imported_tables_count + wasm_bin.tables.items.len);
+ try wasm_bin.tables.append(allocator, table);
+ if (options.export_table) {
+ symbol.setFlag(.WASM_SYM_EXPORTED);
+ } else {
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ }
+ }
}
if (!options.strip and options.module != null) {
@@ -388,6 +421,22 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
return wasm;
}
+/// For a given name, creates a new global synthetic symbol.
+/// Leaves index undefined and the default flags (0).
+fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !SymbolLoc {
+ const name_offset = try wasm.string_table.put(wasm.base.allocator, name);
+ const sym_index = @intCast(u32, wasm.symbols.items.len);
+ const loc: SymbolLoc = .{ .index = sym_index, .file = null };
+ try wasm.symbols.append(wasm.base.allocator, .{
+ .name = name_offset,
+ .flags = 0,
+ .tag = tag,
+ .index = undefined,
+ });
+ try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, loc, {});
+ try wasm.globals.putNoClobber(wasm.base.allocator, name_offset, loc);
+ return loc;
+}
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
@@ -613,7 +662,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
try wasm.resolved_symbols.put(wasm.base.allocator, location, {});
assert(wasm.resolved_symbols.swapRemove(existing_loc));
if (existing_sym.isUndefined()) {
- assert(wasm.undefs.swapRemove(sym_name));
+ _ = wasm.undefs.swapRemove(sym_name);
}
}
}
@@ -624,8 +673,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
log.debug("Resolving symbols in archives", .{});
var index: u32 = 0;
undef_loop: while (index < wasm.undefs.count()) {
- const undef_sym_loc = wasm.undefs.values()[index];
- const sym_name = undef_sym_loc.getName(wasm);
+ const sym_name = wasm.undefs.keys()[index];
for (wasm.archives.items) |archive| {
const offset = archive.toc.get(sym_name) orelse {
@@ -755,6 +803,7 @@ fn validateFeatures(
fn checkUndefinedSymbols(wasm: *const Wasm) !void {
if (wasm.base.options.output_mode == .Obj) return;
+ if (wasm.base.options.import_symbols) return;
var found_undefined_symbols = false;
for (wasm.undefs.values()) |undef| {
@@ -764,7 +813,12 @@ fn checkUndefinedSymbols(wasm: *const Wasm) !void {
const file_name = if (undef.file) |file_index| name: {
break :name wasm.objects.items[file_index].name;
} else wasm.name;
- log.err("could not resolve undefined symbol '{s}'", .{undef.getName(wasm)});
+ const import_name = if (undef.file) |file_index| name: {
+ const obj = wasm.objects.items[file_index];
+ const name_index = obj.findImport(symbol.tag.externalType(), symbol.index).name;
+ break :name obj.string_table.get(name_index);
+ } else wasm.string_table.get(wasm.imports.get(undef).?.name);
+ log.err("could not resolve undefined symbol '{s}'", .{import_name});
log.err(" defined in '{s}'", .{file_name});
}
}
@@ -1237,7 +1291,7 @@ pub fn updateDeclExports(
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
- // When both the to-bo-exported symbol and the already existing symbol
+ // When both the to-be-exported symbol and the already existing symbol
// are strong symbols, we have a linker error.
// In the other case we replace one with the other.
if (!exp_is_weak and !existing_sym.isWeak()) {
@@ -1349,6 +1403,19 @@ fn mapFunctionTable(wasm: *Wasm) void {
while (it.next()) |value_ptr| : (index += 1) {
value_ptr.* = index;
}
+
+ if (wasm.base.options.import_table or wasm.base.options.output_mode == .Obj) {
+ const sym_loc = wasm.globals.get(wasm.string_table.getOffset("__indirect_function_table").?).?;
+ const import = wasm.imports.getPtr(sym_loc).?;
+ import.kind.table.limits.min = index - 1; // we start at index 1.
+ } else if (index > 1) {
+ log.debug("Appending indirect function table", .{});
+ const offset = wasm.string_table.getOffset("__indirect_function_table").?;
+ const sym_with_loc = wasm.globals.get(offset).?;
+ const symbol = sym_with_loc.getSymbol(wasm);
+ const table = &wasm.tables.items[symbol.index - wasm.imported_tables_count];
+ table.limits = .{ .min = index, .max = index };
+ }
}
/// Either creates a new import, or updates one if existing.
@@ -1368,18 +1435,31 @@ pub fn addOrUpdateImport(
type_index: ?u32,
) !void {
assert(symbol_index != 0);
- // For the import name itwasm, we use the decl's name, rather than the fully qualified name
- const decl_name_index = try wasm.string_table.put(wasm.base.allocator, name);
+ // For the import name, we use the decl's name, rather than the fully qualified name
+ // Also mangle the name when the lib name is set and not equal to "C" so imports with the same
+ // name but different module can be resolved correctly.
+ const mangle_name = lib_name != null and
+ !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c");
+ const full_name = if (mangle_name) full_name: {
+ break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? });
+ } else name;
+ defer if (mangle_name) wasm.base.allocator.free(full_name);
+
+ const decl_name_index = try wasm.string_table.put(wasm.base.allocator, full_name);
const symbol: *Symbol = &wasm.symbols.items[symbol_index];
symbol.setUndefined(true);
symbol.setGlobal(true);
symbol.name = decl_name_index;
+ if (mangle_name) {
+ // we specified a specific name for the symbol that does not match the import name
+ symbol.setFlag(.WASM_SYM_EXPLICIT_NAME);
+ }
const global_gop = try wasm.globals.getOrPut(wasm.base.allocator, decl_name_index);
if (!global_gop.found_existing) {
const loc: SymbolLoc = .{ .file = null, .index = symbol_index };
global_gop.value_ptr.* = loc;
try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
- try wasm.undefs.putNoClobber(wasm.base.allocator, name, loc);
+ try wasm.undefs.putNoClobber(wasm.base.allocator, full_name, loc);
}
if (type_index) |ty_index| {
@@ -1390,7 +1470,7 @@ pub fn addOrUpdateImport(
if (!gop.found_existing) {
gop.value_ptr.* = .{
.module_name = try wasm.string_table.put(wasm.base.allocator, module_name),
- .name = decl_name_index,
+ .name = try wasm.string_table.put(wasm.base.allocator, name),
.kind = .{ .function = ty_index },
};
}
@@ -1556,9 +1636,13 @@ fn allocateAtoms(wasm: *Wasm) !void {
var atom: *Atom = entry.value_ptr.*.getFirst();
var offset: u32 = 0;
while (true) {
+ const symbol_loc = atom.symbolLoc();
+ if (!wasm.resolved_symbols.contains(symbol_loc)) {
+ atom = atom.next orelse break;
+ continue;
+ }
offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment);
atom.offset = offset;
- const symbol_loc = atom.symbolLoc();
log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
symbol_loc.getName(wasm),
offset,
@@ -1566,7 +1650,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
atom.size,
});
offset += atom.size;
- try wasm.symbol_atom.put(wasm.base.allocator, atom.symbolLoc(), atom); // Update atom pointers
+ try wasm.symbol_atom.put(wasm.base.allocator, symbol_loc, atom); // Update atom pointers
atom = atom.next orelse break;
}
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
@@ -1684,17 +1768,6 @@ fn setupImports(wasm: *Wasm) !void {
/// Takes the global, function and table section from each linked object file
/// and merges it into a single section for each.
fn mergeSections(wasm: *Wasm) !void {
- // append the indirect function table if initialized
- if (wasm.string_table.getOffset("__indirect_function_table")) |offset| {
- const sym_loc = wasm.globals.get(offset).?;
- const table: std.wasm.Table = .{
- .limits = .{ .min = @intCast(u32, wasm.function_table.count()), .max = null },
- .reftype = .funcref,
- };
- sym_loc.getSymbol(wasm).index = @intCast(u32, wasm.tables.items.len) + wasm.imported_tables_count;
- try wasm.tables.append(wasm.base.allocator, table);
- }
-
for (wasm.resolved_symbols.keys()) |sym_loc| {
if (sym_loc.file == null) {
// Zig code-generated symbols are already within the sections and do not
@@ -1702,7 +1775,7 @@ fn mergeSections(wasm: *Wasm) !void {
continue;
}
- const object = wasm.objects.items[sym_loc.file.?];
+ const object = &wasm.objects.items[sym_loc.file.?];
const symbol = &object.symtable[sym_loc.index];
if (symbol.isUndefined() or (symbol.tag != .function and symbol.tag != .global and symbol.tag != .table)) {
// Skip undefined symbols as they go in the `import` section
@@ -1714,13 +1787,12 @@ fn mergeSections(wasm: *Wasm) !void {
const index = symbol.index - offset;
switch (symbol.tag) {
.function => {
- const original_func = object.functions[index];
const gop = try wasm.functions.getOrPut(
wasm.base.allocator,
.{ .file = sym_loc.file, .index = symbol.index },
);
if (!gop.found_existing) {
- gop.value_ptr.* = original_func;
+ gop.value_ptr.* = object.functions[index];
}
symbol.index = @intCast(u32, gop.index) + wasm.imported_functions_count;
},
@@ -1768,7 +1840,7 @@ fn mergeTypes(wasm: *Wasm) !void {
if (symbol.isUndefined()) {
log.debug("Adding type from extern function '{s}'", .{sym_loc.getName(wasm)});
- const import: *types.Import = wasm.imports.getPtr(sym_loc).?;
+ const import: *types.Import = wasm.imports.getPtr(sym_loc) orelse continue;
const original_type = object.func_types[import.kind.function];
import.kind.function = try wasm.putOrGetFuncType(original_type);
} else if (!dirty.contains(symbol.index)) {
@@ -1785,16 +1857,56 @@ fn setupExports(wasm: *Wasm) !void {
if (wasm.base.options.output_mode == .Obj) return;
log.debug("Building exports from symbols", .{});
+ const force_exp_names = wasm.base.options.export_symbol_names;
+ if (force_exp_names.len > 0) {
+ var failed_exports = try std.ArrayList([]const u8).initCapacity(wasm.base.allocator, force_exp_names.len);
+ defer failed_exports.deinit();
+
+ for (force_exp_names) |exp_name| {
+ const name_index = wasm.string_table.getOffset(exp_name) orelse {
+ failed_exports.appendAssumeCapacity(exp_name);
+ continue;
+ };
+ const loc = wasm.globals.get(name_index) orelse {
+ failed_exports.appendAssumeCapacity(exp_name);
+ continue;
+ };
+
+ const symbol = loc.getSymbol(wasm);
+ symbol.setFlag(.WASM_SYM_EXPORTED);
+ }
+
+ if (failed_exports.items.len > 0) {
+ for (failed_exports.items) |exp_name| {
+ log.err("could not export '{s}', symbol not found", .{exp_name});
+ }
+ return error.MissingSymbol;
+ }
+ }
+
for (wasm.resolved_symbols.keys()) |sym_loc| {
const symbol = sym_loc.getSymbol(wasm);
- if (!symbol.isExported()) continue;
+ if (!symbol.isExported(wasm.base.options.rdynamic)) continue;
const sym_name = sym_loc.getName(wasm);
const export_name = if (wasm.export_names.get(sym_loc)) |name| name else blk: {
if (sym_loc.file == null) break :blk symbol.name;
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
- const exp: types.Export = .{
+ const exp: types.Export = if (symbol.tag == .data) exp: {
+ const atom = wasm.symbol_atom.get(sym_loc).?;
+ const va = atom.getVA(wasm, symbol);
+ const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
+ try wasm.wasm_globals.append(wasm.base.allocator, .{
+ .global_type = .{ .valtype = .i32, .mutable = false },
+ .init = .{ .i32_const = @intCast(i32, va) },
+ });
+ break :exp .{
+ .name = export_name,
+ .kind = .global,
+ .index = global_index,
+ };
+ } else .{
.name = export_name,
.kind = symbol.tag.externalType(),
.index = symbol.index,
@@ -2134,7 +2246,6 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
const new_index = @intCast(u32, wasm.segments.items.len);
index.* = new_index;
try wasm.appendDummySegment();
- // _ = index;
const sym_index = wasm.symbols_free_list.popOrNull() orelse idx: {
const tmp_index = @intCast(u32, wasm.symbols.items.len);
@@ -2202,13 +2313,214 @@ pub fn flush(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) lin
}
return;
}
+
if (build_options.have_llvm and wasm.base.options.use_lld) {
return wasm.linkWithLLD(comp, prog_node);
+ } else if (build_options.have_llvm and wasm.base.options.use_llvm and !wasm.base.options.use_lld) {
+ return wasm.linkWithZld(comp, prog_node);
} else {
return wasm.flushModule(comp, prog_node);
}
}
+/// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary.
+fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const gpa = wasm.base.allocator;
+ const options = wasm.base.options;
+
+ // Used for all temporary memory allocated during flushin
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+
+ const directory = options.emit.?.directory; // Just an alias to make it shorter to type.
+ const full_out_path = try directory.join(arena, &[_][]const u8{options.emit.?.sub_path});
+
+ // If there is no Zig code to compile, then we should skip flushing the output file because it
+ // will not be part of the linker line anyway.
+ const module_obj_path: ?[]const u8 = if (options.module != null) blk: {
+ assert(options.use_llvm); // `linkWithZld` should never be called when the Wasm backend is used
+ try wasm.flushModule(comp, prog_node);
+
+ if (fs.path.dirname(full_out_path)) |dirname| {
+ break :blk try fs.path.join(arena, &.{ dirname, wasm.base.intermediary_basename.? });
+ } else {
+ break :blk wasm.base.intermediary_basename.?;
+ }
+ } else null;
+
+ var sub_prog_node = prog_node.start("Wasm Flush", 0);
+ sub_prog_node.activate();
+ defer sub_prog_node.end();
+
+ const is_obj = options.output_mode == .Obj;
+ const compiler_rt_path: ?[]const u8 = if (options.include_compiler_rt and !is_obj)
+ comp.compiler_rt_lib.?.full_object_path
+ else
+ null;
+ const id_symlink_basename = "zld.id";
+
+ var man: Cache.Manifest = undefined;
+ defer if (!options.disable_lld_caching) man.deinit();
+ var digest: [Cache.hex_digest_len]u8 = undefined;
+
+ // NOTE: The following section must be maintained to be equal
+ // as the section defined in `linkWithLLD`
+ if (!options.disable_lld_caching) {
+ man = comp.cache_parent.obtain();
+
+ // We are about to obtain this lock, so here we give other processes a chance first.
+ wasm.base.releaseLock();
+
+ comptime assert(Compilation.link_hash_implementation_version == 7);
+
+ for (options.objects) |obj| {
+ _ = try man.addFile(obj.path, null);
+ man.hash.add(obj.must_link);
+ }
+ for (comp.c_object_table.keys()) |key| {
+ _ = try man.addFile(key.status.success.object_path, null);
+ }
+ try man.addOptionalFile(module_obj_path);
+ try man.addOptionalFile(compiler_rt_path);
+ man.hash.addOptionalBytes(options.entry);
+ man.hash.addOptional(options.stack_size_override);
+ man.hash.add(options.import_memory);
+ man.hash.add(options.import_table);
+ man.hash.add(options.export_table);
+ man.hash.addOptional(options.initial_memory);
+ man.hash.addOptional(options.max_memory);
+ man.hash.add(options.shared_memory);
+ man.hash.addOptional(options.global_base);
+ man.hash.add(options.export_symbol_names.len);
+ // strip does not need to go into the linker hash because it is part of the hash namespace
+ for (options.export_symbol_names) |symbol_name| {
+ man.hash.addBytes(symbol_name);
+ }
+
+ // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ _ = try man.hit();
+ digest = man.final();
+
+ var prev_digest_buf: [digest.len]u8 = undefined;
+ const prev_digest: []u8 = Cache.readSmallFile(
+ directory.handle,
+ id_symlink_basename,
+ &prev_digest_buf,
+ ) catch |err| blk: {
+ log.debug("WASM LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
+ // Handle this as a cache miss.
+ break :blk prev_digest_buf[0..0];
+ };
+ if (mem.eql(u8, prev_digest, &digest)) {
+ log.debug("WASM LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
+ // Hot diggity dog! The output binary is already there.
+ wasm.base.lock = man.toOwnedLock();
+ return;
+ }
+ log.debug("WASM LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
+
+ // We are about to change the output file to be different, so we invalidate the build hash now.
+ directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return e,
+ };
+ }
+
+ // Positional arguments to the linker such as object files and static archives.
+ var positionals = std.ArrayList([]const u8).init(arena);
+ try positionals.ensureUnusedCapacity(options.objects.len);
+
+ // When the target os is WASI, we allow linking with WASI-LIBC
+ if (options.target.os.tag == .wasi) {
+ const is_exe_or_dyn_lib = wasm.base.options.output_mode == .Exe or
+ (wasm.base.options.output_mode == .Lib and wasm.base.options.link_mode == .Dynamic);
+ if (is_exe_or_dyn_lib) {
+ const wasi_emulated_libs = wasm.base.options.wasi_emulated_libs;
+ for (wasi_emulated_libs) |crt_file| {
+ try positionals.append(try comp.get_libc_crt_file(
+ arena,
+ wasi_libc.emulatedLibCRFileLibName(crt_file),
+ ));
+ }
+
+ if (wasm.base.options.link_libc) {
+ try positionals.append(try comp.get_libc_crt_file(
+ arena,
+ wasi_libc.execModelCrtFileFullName(wasm.base.options.wasi_exec_model),
+ ));
+ try positionals.append(try comp.get_libc_crt_file(arena, "libc.a"));
+ }
+
+ if (wasm.base.options.link_libcpp) {
+ try positionals.append(comp.libcxx_static_lib.?.full_object_path);
+ try positionals.append(comp.libcxxabi_static_lib.?.full_object_path);
+ }
+ }
+ }
+
+ if (module_obj_path) |path| {
+ try positionals.append(path);
+ }
+
+ for (options.objects) |object| {
+ try positionals.append(object.path);
+ }
+
+ for (comp.c_object_table.keys()) |c_object| {
+ try positionals.append(c_object.status.success.object_path);
+ }
+
+ if (comp.compiler_rt_lib) |lib| {
+ try positionals.append(lib.full_object_path);
+ }
+
+ try wasm.parseInputFiles(positionals.items);
+
+ for (wasm.objects.items) |_, object_index| {
+ try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
+ }
+
+ var emit_features_count: u32 = 0;
+ var enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool = undefined;
+ try wasm.validateFeatures(&enabled_features, &emit_features_count);
+ try wasm.resolveSymbolsInArchives();
+ try wasm.checkUndefinedSymbols();
+
+ try wasm.setupStart();
+ try wasm.setupImports();
+
+ for (wasm.objects.items) |*object, object_index| {
+ try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
+ }
+
+ try wasm.allocateAtoms();
+ try wasm.setupMemory();
+ wasm.mapFunctionTable();
+ try wasm.mergeSections();
+ try wasm.mergeTypes();
+ try wasm.setupExports();
+ try wasm.writeToFile(enabled_features, emit_features_count, arena);
+
+ if (!wasm.base.options.disable_lld_caching) {
+ // Update the file with the digest. If it fails we can continue; it only
+ // means that the next invocation will have an unnecessary cache miss.
+ Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
+ log.warn("failed to save linking hash digest symlink: {s}", .{@errorName(err)});
+ };
+ // Again failure here only means an unnecessary cache miss.
+ man.writeManifest() catch |err| {
+ log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
+ };
+ // We hang on to this lock so that the output file path can be used without
+ // other processes clobbering it.
+ wasm.base.lock = man.toOwnedLock();
+ }
+}
+
pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -2219,20 +2531,13 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
- var sub_prog_node = prog_node.start("WASM Flush", 0);
+ var sub_prog_node = prog_node.start("Wasm Flush", 0);
sub_prog_node.activate();
defer sub_prog_node.end();
// ensure the error names table is populated when an error name is referenced
try wasm.populateErrorNameTable();
- // The amount of sections that will be written
- var section_count: u32 = 0;
- // Index of the code section. Used to tell relocation table where the section lives.
- var code_section_index: ?u32 = null;
- // Index of the data section. Used to tell relocation table where the section lives.
- var data_section_index: ?u32 = null;
-
// Used for all temporary memory allocated during flushin
var arena_instance = std.heap.ArenaAllocator.init(wasm.base.allocator);
defer arena_instance.deinit();
@@ -2312,9 +2617,25 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.mergeSections();
try wasm.mergeTypes();
try wasm.setupExports();
+ try wasm.writeToFile(enabled_features, emit_features_count, arena);
+}
+/// Writes the WebAssembly in-memory module to the file
+fn writeToFile(
+ wasm: *Wasm,
+ enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool,
+ feature_count: u32,
+ arena: Allocator,
+) !void {
+ // Size of each section header
const header_size = 5 + 1;
- const is_obj = wasm.base.options.output_mode == .Obj;
+ // The amount of sections that will be written
+ var section_count: u32 = 0;
+ // Index of the code section. Used to tell relocation table where the section lives.
+ var code_section_index: ?u32 = null;
+ // Index of the data section. Used to tell relocation table where the section lives.
+ var data_section_index: ?u32 = null;
+ const is_obj = wasm.base.options.output_mode == .Obj or (!wasm.base.options.use_llvm and wasm.base.options.use_lld);
var binary_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer binary_bytes.deinit();
@@ -2355,28 +2676,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
// Import section
const import_memory = wasm.base.options.import_memory or is_obj;
- const import_table = wasm.base.options.import_table or is_obj;
- if (wasm.imports.count() != 0 or import_memory or import_table) {
+ if (wasm.imports.count() != 0 or import_memory) {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- // import table is always first table so emit that first
- if (import_table) {
- const table_imp: types.Import = .{
- .module_name = try wasm.string_table.put(wasm.base.allocator, wasm.host_name),
- .name = try wasm.string_table.put(wasm.base.allocator, "__indirect_function_table"),
- .kind = .{
- .table = .{
- .limits = .{
- .min = @intCast(u32, wasm.function_table.count()),
- .max = null,
- },
- .reftype = .funcref,
- },
- },
- };
- try wasm.emitImport(binary_writer, table_imp);
- }
-
var it = wasm.imports.iterator();
while (it.next()) |entry| {
assert(entry.key_ptr.*.getSymbol(wasm).isUndefined());
@@ -2399,7 +2701,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
header_offset,
.import,
@intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.imports.count() + @boolToInt(import_memory) + @boolToInt(import_table)),
+ @intCast(u32, wasm.imports.count() + @boolToInt(import_memory)),
);
section_count += 1;
}
@@ -2422,22 +2724,20 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
// Table section
- const export_table = wasm.base.options.export_table;
- if (!import_table and wasm.function_table.count() != 0) {
+ if (wasm.tables.items.len > 0) {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- try leb.writeULEB128(binary_writer, std.wasm.reftype(.funcref));
- try emitLimits(binary_writer, .{
- .min = @intCast(u32, wasm.function_table.count()) + 1,
- .max = null,
- });
+ for (wasm.tables.items) |table| {
+ try leb.writeULEB128(binary_writer, std.wasm.reftype(table.reftype));
+ try emitLimits(binary_writer, table.limits);
+ }
try writeVecSectionHeader(
binary_bytes.items,
header_offset,
.table,
@intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @as(u32, 1),
+ @intCast(u32, wasm.tables.items.len),
);
section_count += 1;
}
@@ -2478,7 +2778,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
// Export section
- if (wasm.exports.items.len != 0 or export_table or !import_memory) {
+ if (wasm.exports.items.len != 0 or !import_memory) {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
for (wasm.exports.items) |exp| {
@@ -2489,13 +2789,6 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try leb.writeULEB128(binary_writer, exp.index);
}
- if (export_table) {
- try leb.writeULEB128(binary_writer, @intCast(u32, "__indirect_function_table".len));
- try binary_writer.writeAll("__indirect_function_table");
- try binary_writer.writeByte(std.wasm.externalKind(.table));
- try leb.writeULEB128(binary_writer, @as(u32, 0)); // function table is always the first table
- }
-
if (!import_memory) {
try leb.writeULEB128(binary_writer, @intCast(u32, "memory".len));
try binary_writer.writeAll("memory");
@@ -2508,7 +2801,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
header_offset,
.@"export",
@intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.exports.items.len) + @boolToInt(export_table) + @boolToInt(!import_memory),
+ @intCast(u32, wasm.exports.items.len) + @boolToInt(!import_memory),
);
section_count += 1;
}
@@ -2517,11 +2810,18 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
if (wasm.function_table.count() > 0) {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- var flags: u32 = 0x2; // Yes we have a table
+ const table_loc = wasm.globals.get(wasm.string_table.getOffset("__indirect_function_table").?).?;
+ const table_sym = table_loc.getSymbol(wasm);
+
+ var flags: u32 = if (table_sym.index == 0) 0x0 else 0x02; // passive with implicit 0-index table or set table index manually
try leb.writeULEB128(binary_writer, flags);
- try leb.writeULEB128(binary_writer, @as(u32, 0)); // index of that table. TODO: Store synthetic symbols
+ if (flags == 0x02) {
+ try leb.writeULEB128(binary_writer, table_sym.index);
+ }
try emitInit(binary_writer, .{ .i32_const = 1 }); // We start at index 1, so unresolved function pointers are invalid
- try leb.writeULEB128(binary_writer, @as(u8, 0));
+ if (flags == 0x02) {
+ try leb.writeULEB128(binary_writer, @as(u8, 0)); // represents funcref
+ }
try leb.writeULEB128(binary_writer, @intCast(u32, wasm.function_table.count()));
var symbol_it = wasm.function_table.keyIterator();
while (symbol_it.next()) |symbol_loc_ptr| {
@@ -2708,8 +3008,8 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
try emitProducerSection(&binary_bytes);
- if (emit_features_count > 0) {
- try emitFeaturesSection(&binary_bytes, &enabled_features, emit_features_count);
+ if (feature_count > 0) {
+ try emitFeaturesSection(&binary_bytes, &enabled_features, feature_count);
}
}
@@ -2848,9 +3148,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
for (wasm.resolved_symbols.keys()) |sym_loc| {
const symbol = sym_loc.getSymbol(wasm).*;
- const name = if (symbol.isUndefined()) blk: {
- break :blk wasm.string_table.get(wasm.imports.get(sym_loc).?.name);
- } else sym_loc.getName(wasm);
+ const name = sym_loc.getName(wasm);
switch (symbol.tag) {
.function => {
const gop = funcs.getOrPutAssumeCapacity(symbol.index);
@@ -3161,39 +3459,14 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
try argv.append("--stack-first");
}
- var auto_export_symbols = true;
// Users are allowed to specify which symbols they want to export to the wasm host.
for (wasm.base.options.export_symbol_names) |symbol_name| {
const arg = try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name});
try argv.append(arg);
- auto_export_symbols = false;
}
if (wasm.base.options.rdynamic) {
try argv.append("--export-dynamic");
- auto_export_symbols = false;
- }
-
- if (auto_export_symbols) {
- if (wasm.base.options.module) |mod| {
- // when we use stage1, we use the exports that stage1 provided us.
- // For stage2, we can directly retrieve them from the module.
- const skip_export_non_fn = target.os.tag == .wasi and
- wasm.base.options.wasi_exec_model == .command;
- for (mod.decl_exports.values()) |exports| {
- for (exports.items) |exprt| {
- const exported_decl = mod.declPtr(exprt.exported_decl);
- if (skip_export_non_fn and exported_decl.ty.zigTypeTag() != .Fn) {
- // skip exporting symbols when we're building a WASI command
- // and the symbol is not a function
- continue;
- }
- const symbol_name = exported_decl.name;
- const arg = try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name});
- try argv.append(arg);
- }
- }
- }
}
if (wasm.base.options.entry) |entry| {
@@ -3212,12 +3485,20 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
if (wasm.base.options.wasi_exec_model == .reactor) {
// Reactor execution model does not have _start so lld doesn't look for it.
try argv.append("--no-entry");
+ // Make sure "_initialize" and other used-defined functions are exported if this is WASI reactor.
+ // If rdynamic is true, it will already be appended, so only verify if the user did not specify
+ // the flag in which case, we ensure `--export-dynamic` is called.
+ if (!wasm.base.options.rdynamic) {
+ try argv.append("--export-dynamic");
+ }
}
} else if (wasm.base.options.entry == null) {
try argv.append("--no-entry"); // So lld doesn't look for _start.
}
+ if (wasm.base.options.import_symbols) {
+ try argv.append("--allow-undefined");
+ }
try argv.appendSlice(&[_][]const u8{
- "--allow-undefined",
"-o",
full_out_path,
});
@@ -3607,15 +3888,23 @@ fn emitDataRelocations(
try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
}
-/// Searches for an a matching function signature, when not found
-/// a new entry will be made. The index of the existing/new signature will be returned.
-pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
+pub fn getTypeIndex(wasm: *const Wasm, func_type: std.wasm.Type) ?u32 {
var index: u32 = 0;
while (index < wasm.func_types.items.len) : (index += 1) {
if (wasm.func_types.items[index].eql(func_type)) return index;
}
+ return null;
+}
+
+/// Searches for an a matching function signature, when not found
+/// a new entry will be made. The index of the existing/new signature will be returned.
+pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
+ if (wasm.getTypeIndex(func_type)) |index| {
+ return index;
+ }
// functype does not exist.
+ const index = @intCast(u32, wasm.func_types.items.len);
const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params);
errdefer wasm.base.allocator.free(params);
const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns);
diff --git a/src/link/Wasm/Archive.zig b/src/link/Wasm/Archive.zig
index 2fa1e07915..c4fb9b8291 100644
--- a/src/link/Wasm/Archive.zig
+++ b/src/link/Wasm/Archive.zig
@@ -157,13 +157,12 @@ fn parseTableOfContents(archive: *Archive, allocator: Allocator, reader: anytype
};
var i: usize = 0;
- while (i < sym_tab.len) {
- const string = mem.sliceTo(sym_tab[i..], 0);
- if (string.len == 0) {
- i += 1;
- continue;
- }
- i += string.len;
+ var pos: usize = 0;
+ while (i < num_symbols) : (i += 1) {
+ const string = mem.sliceTo(sym_tab[pos..], 0);
+ pos += string.len + 1;
+ if (string.len == 0) continue;
+
const name = try allocator.dupe(u8, string);
errdefer allocator.free(name);
const gop = try archive.toc.getOrPut(allocator, name);
@@ -172,7 +171,7 @@ fn parseTableOfContents(archive: *Archive, allocator: Allocator, reader: anytype
} else {
gop.value_ptr.* = .{};
}
- try gop.value_ptr.append(allocator, symbol_positions[gop.index]);
+ try gop.value_ptr.append(allocator, symbol_positions[i]);
}
}
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 4c1da180bd..de9cefebdc 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -90,24 +90,26 @@ pub fn getFirst(atom: *Atom) *Atom {
return tmp;
}
-/// Unlike `getFirst` this returns the first `*Atom` that was
-/// produced from Zig code, rather than an object file.
-/// This is useful for debug sections where we want to extend
-/// the bytes, and don't want to overwrite existing Atoms.
-pub fn getFirstZigAtom(atom: *Atom) *Atom {
- if (atom.file == null) return atom;
- var tmp = atom;
- return while (tmp.prev) |prev| {
- if (prev.file == null) break prev;
- tmp = prev;
- } else unreachable; // must allocate an Atom first!
-}
-
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
+/// Returns the virtual address of the `Atom`. This is the address starting
+/// from the first entry within a section.
+pub fn getVA(atom: Atom, wasm: *const Wasm, symbol: *const Symbol) u32 {
+ if (symbol.tag == .function) return atom.offset;
+ std.debug.assert(symbol.tag == .data);
+ const merge_segment = wasm.base.options.output_mode != .Obj;
+ const segment_info = if (atom.file) |object_index| blk: {
+ break :blk wasm.objects.items[object_index].segment_info;
+ } else wasm.segment_info.values();
+ const segment_name = segment_info[symbol.index].outputName(merge_segment);
+ const segment_index = wasm.data_segments.get(segment_name).?;
+ const segment = wasm.segments.items[segment_index];
+ return segment.offset + atom.offset;
+}
+
/// Resolves the relocations within the atom, writing the new value
/// at the calculated offset.
pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
@@ -159,7 +161,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
/// The final value must be casted to the correct size.
fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wasm) u64 {
const target_loc = (Wasm.SymbolLoc{ .file = atom.file, .index = relocation.index }).finalLoc(wasm_bin);
- const symbol = target_loc.getSymbol(wasm_bin).*;
+ const symbol = target_loc.getSymbol(wasm_bin);
switch (relocation.relocation_type) {
.R_WASM_FUNCTION_INDEX_LEB => return symbol.index,
.R_WASM_TABLE_NUMBER_LEB => return symbol.index,
@@ -168,12 +170,13 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
.R_WASM_TABLE_INDEX_SLEB,
.R_WASM_TABLE_INDEX_SLEB64,
=> return wasm_bin.function_table.get(target_loc) orelse 0,
- .R_WASM_TYPE_INDEX_LEB => return blk: {
- if (symbol.isUndefined()) {
- const imp = wasm_bin.imports.get(target_loc).?;
- break :blk imp.kind.function;
- }
- break :blk wasm_bin.functions.values()[symbol.index - wasm_bin.imported_functions_count].type_index;
+ .R_WASM_TYPE_INDEX_LEB => {
+ const file_index = atom.file orelse {
+ return relocation.index;
+ };
+
+ const original_type = wasm_bin.objects.items[file_index].func_types[relocation.index];
+ return wasm_bin.getTypeIndex(original_type).?;
},
.R_WASM_GLOBAL_INDEX_I32,
.R_WASM_GLOBAL_INDEX_LEB,
@@ -185,17 +188,13 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
.R_WASM_MEMORY_ADDR_SLEB,
.R_WASM_MEMORY_ADDR_SLEB64,
=> {
- std.debug.assert(symbol.tag == .data and !symbol.isUndefined());
- const merge_segment = wasm_bin.base.options.output_mode != .Obj;
+ std.debug.assert(symbol.tag == .data);
+ if (symbol.isUndefined()) {
+ return 0;
+ }
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
- const segment_info = if (target_atom.file) |object_index| blk: {
- break :blk wasm_bin.objects.items[object_index].segment_info;
- } else wasm_bin.segment_info.values();
- const segment_name = segment_info[symbol.index].outputName(merge_segment);
- const segment_index = wasm_bin.data_segments.get(segment_name).?;
- const segment = wasm_bin.segments.items[segment_index];
- const rel_value = @intCast(i32, target_atom.offset + segment.offset) + relocation.addend;
- return @intCast(u32, rel_value);
+ const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
+ return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
@@ -204,18 +203,10 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
- var current_atom = target_atom.getFirst();
- var offset: u32 = 0;
- // TODO: Calculate this during atom allocation, rather than
- // this linear calculation. For now it's done here as atoms
- // are being sorted after atom allocation, as functions aren't
- // merged until later.
- while (true) {
- offset += 5; // each atom uses 5 bytes to store its body's size
- if (current_atom == target_atom) break;
- current_atom = current_atom.next.?;
- }
+ const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
+ return @bitCast(u32, @as(i32, -1));
+ };
+ const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
},
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index e5947228a5..8f49d68712 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -923,7 +923,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
try atom.relocs.append(gpa, reloc);
if (relocation.isTableIndex()) {
- try wasm_bin.function_table.putNoClobber(gpa, .{
+ try wasm_bin.function_table.put(gpa, .{
.file = object_index,
.index = relocation.index,
}, 0);
@@ -938,17 +938,17 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
+ try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
+ try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
const alias_symbol = object.symtable[idx];
- const symbol = object.symtable[atom.sym_index];
- if (alias_symbol.isGlobal() and symbol.isLocal()) {
+ if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
}
}
- try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
}
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
diff --git a/src/link/Wasm/Symbol.zig b/src/link/Wasm/Symbol.zig
index d857e6de44..089eee289e 100644
--- a/src/link/Wasm/Symbol.zig
+++ b/src/link/Wasm/Symbol.zig
@@ -38,7 +38,7 @@ pub const Tag = enum {
return switch (tag) {
.function => .function,
.global => .global,
- .data => .memory,
+ .data => unreachable, // Data symbols will generate a global
.section => unreachable, // Not an external type
.event => unreachable, // Not an external type
.dead => unreachable, // Dead symbols should not be referenced
@@ -139,12 +139,10 @@ pub fn isNoStrip(symbol: Symbol) bool {
return symbol.flags & @enumToInt(Flag.WASM_SYM_NO_STRIP) != 0;
}
-pub fn isExported(symbol: Symbol) bool {
+pub fn isExported(symbol: Symbol, is_dynamic: bool) bool {
if (symbol.isUndefined() or symbol.isLocal()) return false;
- if (symbol.isHidden()) return false;
- if (symbol.hasFlag(.WASM_SYM_EXPORTED)) return true;
- if (symbol.hasFlag(.WASM_SYM_BINDING_WEAK)) return false;
- return true;
+ if (is_dynamic and symbol.isVisible()) return true;
+ return symbol.hasFlag(.WASM_SYM_EXPORTED);
}
pub fn isWeak(symbol: Symbol) bool {
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 9039d3b4f1..748f1c138f 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -339,7 +339,7 @@ pub const Yaml = struct {
if (union_info.tag_type) |_| {
inline for (union_info.fields) |field| {
- if (self.parseValue(field.field_type, value)) |u_value| {
+ if (self.parseValue(field.type, value)) |u_value| {
return @unionInit(T, field.name, u_value);
} else |err| {
if (@as(@TypeOf(err) || error{TypeMismatch}, err) != error.TypeMismatch) return err;
@@ -366,16 +366,16 @@ pub const Yaml = struct {
break :blk map.get(field_name);
};
- if (@typeInfo(field.field_type) == .Optional) {
- @field(parsed, field.name) = try self.parseOptional(field.field_type, value);
+ if (@typeInfo(field.type) == .Optional) {
+ @field(parsed, field.name) = try self.parseOptional(field.type, value);
continue;
}
const unwrapped = value orelse {
- log.debug("missing struct field: {s}: {s}", .{ field.name, @typeName(field.field_type) });
+ log.debug("missing struct field: {s}: {s}", .{ field.name, @typeName(field.type) });
return error.StructFieldMissing;
};
- @field(parsed, field.name) = try self.parseValue(field.field_type, unwrapped);
+ @field(parsed, field.name) = try self.parseValue(field.type, unwrapped);
}
return parsed;
diff --git a/src/main.zig b/src/main.zig
index a7d4dc6f03..421164de1c 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -517,6 +517,7 @@ const usage_build_generic =
\\ -dead_strip (Darwin) remove functions and data that are unreachable by the entry point or exported symbols
\\ -dead_strip_dylibs (Darwin) remove dylibs that are unreachable by the entry point or exported symbols
\\ --import-memory (WebAssembly) import memory from the environment
+ \\ --import-symbols (WebAssembly) import missing symbols from the host environment
\\ --import-table (WebAssembly) import function table from the host environment
\\ --export-table (WebAssembly) export function table to the host environment
\\ --initial-memory=[bytes] (WebAssembly) initial size of the linear memory
@@ -535,6 +536,7 @@ const usage_build_generic =
\\ --test-runner [path] Specify a custom test runner
\\
\\Debug Options (Zig Compiler Development):
+ \\ -fopt-bisect-limit [limit] Only run [limit] first LLVM optimization passes
\\ -ftime-report Print timing diagnostics
\\ -fstack-report Print stack size diagnostics
\\ --verbose-link Display linker invocations
@@ -718,6 +720,7 @@ fn buildOutputType(
var linker_allow_shlib_undefined: ?bool = null;
var linker_bind_global_refs_locally: ?bool = null;
var linker_import_memory: ?bool = null;
+ var linker_import_symbols: bool = false;
var linker_import_table: bool = false;
var linker_export_table: bool = false;
var linker_initial_memory: ?u64 = null;
@@ -727,6 +730,7 @@ fn buildOutputType(
var linker_print_gc_sections: bool = false;
var linker_print_icf_sections: bool = false;
var linker_print_map: bool = false;
+ var linker_opt_bisect_limit: i32 = -1;
var linker_z_nocopyreloc = false;
var linker_z_nodelete = false;
var linker_z_notext = false;
@@ -1283,6 +1287,8 @@ fn buildOutputType(
no_builtin = false;
} else if (mem.eql(u8, arg, "-fno-builtin")) {
no_builtin = true;
+ } else if (mem.startsWith(u8, arg, "-fopt-bisect-limit=")) {
+ linker_opt_bisect_limit = std.math.lossyCast(i32, parseIntSuffix(arg, "-fopt-bisect-limit=".len));
} else if (mem.eql(u8, arg, "--eh-frame-hdr")) {
link_eh_frame_hdr = true;
} else if (mem.eql(u8, arg, "--emit-relocs")) {
@@ -1316,6 +1322,8 @@ fn buildOutputType(
}
} else if (mem.eql(u8, arg, "--import-memory")) {
linker_import_memory = true;
+ } else if (mem.eql(u8, arg, "--import-symbols")) {
+ linker_import_symbols = true;
} else if (mem.eql(u8, arg, "--import-table")) {
linker_import_table = true;
} else if (mem.eql(u8, arg, "--export-table")) {
@@ -1697,6 +1705,18 @@ fn buildOutputType(
};
}
},
+ .install_name => {
+ install_name = it.only_arg;
+ },
+ .undefined => {
+ if (mem.eql(u8, "dynamic_lookup", it.only_arg)) {
+ linker_allow_shlib_undefined = true;
+ } else if (mem.eql(u8, "error", it.only_arg)) {
+ linker_allow_shlib_undefined = false;
+ } else {
+ fatal("unsupported -undefined option '{s}'", .{it.only_arg});
+ }
+ },
}
}
// Parse linker args.
@@ -1827,6 +1847,8 @@ fn buildOutputType(
linker_bind_global_refs_locally = true;
} else if (mem.eql(u8, arg, "--import-memory")) {
linker_import_memory = true;
+ } else if (mem.eql(u8, arg, "--import-symbols")) {
+ linker_import_symbols = true;
} else if (mem.eql(u8, arg, "--import-table")) {
linker_import_table = true;
} else if (mem.eql(u8, arg, "--export-table")) {
@@ -2061,6 +2083,8 @@ fn buildOutputType(
}
if (mem.eql(u8, "dynamic_lookup", linker_args.items[i])) {
linker_allow_shlib_undefined = true;
+ } else if (mem.eql(u8, "error", linker_args.items[i])) {
+ linker_allow_shlib_undefined = false;
} else {
fatal("unsupported -undefined option '{s}'", .{linker_args.items[i]});
}
@@ -2967,6 +2991,7 @@ fn buildOutputType(
.linker_allow_shlib_undefined = linker_allow_shlib_undefined,
.linker_bind_global_refs_locally = linker_bind_global_refs_locally,
.linker_import_memory = linker_import_memory,
+ .linker_import_symbols = linker_import_symbols,
.linker_import_table = linker_import_table,
.linker_export_table = linker_export_table,
.linker_initial_memory = linker_initial_memory,
@@ -2975,6 +3000,7 @@ fn buildOutputType(
.linker_print_gc_sections = linker_print_gc_sections,
.linker_print_icf_sections = linker_print_icf_sections,
.linker_print_map = linker_print_map,
+ .linker_opt_bisect_limit = linker_opt_bisect_limit,
.linker_global_base = linker_global_base,
.linker_export_symbol_names = linker_export_symbol_names.items,
.linker_z_nocopyreloc = linker_z_nocopyreloc,
@@ -3431,7 +3457,7 @@ fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void
if (errors.list.len != 0) {
const ttyconf: std.debug.TTY.Config = switch (comp.color) {
- .auto => std.debug.detectTTYConfig(),
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
@@ -4252,7 +4278,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(),
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
@@ -4465,7 +4491,7 @@ fn fmtPathFile(
try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (fmt.color) {
- .auto => std.debug.detectTTYConfig(),
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
@@ -4586,7 +4612,7 @@ fn printErrsMsgToStdErr(
};
const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(),
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
@@ -4791,6 +4817,8 @@ pub const ClangArgIterator = struct {
weak_framework,
headerpad_max_install_names,
compress_debug_sections,
+ install_name,
+ undefined,
};
const Args = struct {
@@ -5176,7 +5204,7 @@ pub fn cmdAstCheck(
var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
try Compilation.AllErrors.addZir(arena, &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(),
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
@@ -5301,7 +5329,7 @@ pub fn cmdChangelist(
if (file.zir.hasCompileErrors()) {
var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
try Compilation.AllErrors.addZir(arena, &errors, &file);
- const ttyconf = std.debug.detectTTYConfig();
+ const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
for (errors.items) |full_err_msg| {
full_err_msg.renderToStdErr(ttyconf);
}
@@ -5340,7 +5368,7 @@ pub fn cmdChangelist(
if (file.zir.hasCompileErrors()) {
var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
try Compilation.AllErrors.addZir(arena, &errors, &file);
- const ttyconf = std.debug.detectTTYConfig();
+ const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
for (errors.items) |full_err_msg| {
full_err_msg.renderToStdErr(ttyconf);
}
diff --git a/src/mingw.zig b/src/mingw.zig
index a4f2f0cf91..1fee8e90a4 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -543,6 +543,7 @@ const msvcrt_common_src = [_][]const u8{
"stdio" ++ path.sep_str ++ "acrt_iob_func.c",
"stdio" ++ path.sep_str ++ "snprintf_alias.c",
"stdio" ++ path.sep_str ++ "vsnprintf_alias.c",
+ "stdio" ++ path.sep_str ++ "_vscprintf.c",
"misc" ++ path.sep_str ++ "_configthreadlocale.c",
"misc" ++ path.sep_str ++ "_get_current_locale.c",
"misc" ++ path.sep_str ++ "invalid_parameter_handler.c",
diff --git a/src/print_air.zig b/src/print_air.zig
index 671f781e5e..133e987285 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -191,6 +191,7 @@ const Writer = struct {
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
+ .c_va_end,
=> try w.writeUnOp(s, inst),
.breakpoint,
@@ -203,10 +204,12 @@ const Writer = struct {
.const_ty,
.alloc,
.ret_ptr,
- .arg,
.err_return_trace,
+ .c_va_start,
=> try w.writeTy(s, inst),
+ .arg => try w.writeArg(s, inst),
+
.not,
.bitcast,
.load,
@@ -246,6 +249,8 @@ const Writer = struct {
.bit_reverse,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> try w.writeTyOp(s, inst),
.block,
@@ -306,6 +311,7 @@ const Writer = struct {
.shuffle => try w.writeShuffle(s, inst),
.reduce, .reduce_optimized => try w.writeReduce(s, inst),
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
+ .vector_store_elem => try w.writeVectorStoreElem(s, inst),
.dbg_block_begin, .dbg_block_end => {},
}
@@ -346,6 +352,12 @@ const Writer = struct {
try w.writeType(s, ty);
}
+ fn writeArg(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const arg = w.air.instructions.items(.data)[inst].arg;
+ try w.writeType(s, w.air.getRefType(arg.ty));
+ try s.print(", {d}", .{arg.src_index});
+ }
+
fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_op = w.air.instructions.items(.data)[inst].ty_op;
try w.writeType(s, w.air.getRefType(ty_op.ty));
@@ -478,6 +490,17 @@ const Writer = struct {
try w.writeOperand(s, inst, 1, extra.rhs);
}
+ fn writeVectorStoreElem(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const data = w.air.instructions.items(.data)[inst].vector_store_elem;
+ const extra = w.air.extraData(Air.VectorCmp, data.payload).data;
+
+ try w.writeOperand(s, inst, 0, data.vector_ptr);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 2, extra.rhs);
+ }
+
fn writeFence(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const atomic_order = w.air.instructions.items(.data)[inst].fence;
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6dbaf51bc3..49c97a5bc7 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -185,7 +185,6 @@ const Writer = struct {
.size_of,
.bit_size_of,
.typeof_log2_int_type,
- .log2_int_type,
.ptr_to_int,
.compile_error,
.set_eval_branch_quota,
@@ -230,7 +229,6 @@ const Writer = struct {
.validate_struct_init_ty,
.make_ptr_const,
.validate_deref,
- .overflow_arithmetic_ptr,
.check_comptime_control_flow,
=> try self.writeUnNode(stream, inst),
@@ -465,6 +463,7 @@ const Writer = struct {
.frame,
.frame_address,
.breakpoint,
+ .c_va_start,
=> try self.writeExtNode(stream, extended),
.builtin_src => {
@@ -504,6 +503,8 @@ const Writer = struct {
.error_to_int,
.int_to_error,
.reify,
+ .c_va_copy,
+ .c_va_end,
=> {
const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
@@ -518,6 +519,7 @@ const Writer = struct {
.wasm_memory_grow,
.prefetch,
.addrspace_cast,
+ .c_va_arg,
=> {
const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
@@ -1149,14 +1151,12 @@ const Writer = struct {
}
fn writeOverflowArithmetic(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
+ const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
try self.writeInstRef(stream, extra.lhs);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.rhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.ptr);
try stream.writeAll(")) ");
try self.writeSrc(stream, src);
}
@@ -1312,7 +1312,7 @@ const Writer = struct {
type_len: u32 = 0,
align_len: u32 = 0,
init_len: u32 = 0,
- field_type: Zir.Inst.Ref = .none,
+ type: Zir.Inst.Ref = .none,
name: u32,
is_comptime: bool,
};
@@ -1353,7 +1353,7 @@ const Writer = struct {
if (has_type_body) {
fields[field_i].type_len = self.code.extra[extra_index];
} else {
- fields[field_i].field_type = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ fields[field_i].type = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
}
extra_index += 1;
@@ -1384,8 +1384,8 @@ const Writer = struct {
} else {
try stream.print("@\"{d}\": ", .{i});
}
- if (field.field_type != .none) {
- try self.writeInstRef(stream, field.field_type);
+ if (field.type != .none) {
+ try self.writeInstRef(stream, field.type);
}
if (field.type_len > 0) {
diff --git a/src/target.zig b/src/target.zig
index 836791a1d1..001adad7c2 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -437,9 +437,6 @@ pub fn hasRedZone(target: std.Target) bool {
return switch (target.cpu.arch) {
.x86_64,
.x86,
- .powerpc,
- .powerpc64,
- .powerpc64le,
.aarch64,
.aarch64_be,
.aarch64_32,
diff --git a/src/test.zig b/src/test.zig
index 56abe248b3..08a62b891f 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -1723,7 +1723,8 @@ pub const TestContext = struct {
(case_msg.src.column == std.math.maxInt(u32) or
actual_msg.column == case_msg.src.column) and
std.mem.eql(u8, expected_msg, actual_msg.msg) and
- case_msg.src.kind == .note)
+ case_msg.src.kind == .note and
+ actual_msg.count == case_msg.src.count)
{
handled_errors[i] = true;
break;
@@ -1733,7 +1734,8 @@ pub const TestContext = struct {
if (ex_tag != .plain) continue;
if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and
- case_msg.plain.kind == .note)
+ case_msg.plain.kind == .note and
+ case_msg.plain.count == plain.count)
{
handled_errors[i] = true;
break;
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 1ed2eb568c..78175a611b 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -392,7 +392,7 @@ pub const Node = extern union {
}
pub fn Data(comptime t: Tag) type {
- return std.meta.fieldInfo(t.Type(), .data).field_type;
+ return std.meta.fieldInfo(t.Type(), .data).type;
}
};
@@ -845,7 +845,7 @@ const Context = struct {
try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len);
const result = @intCast(u32, c.extra_data.items.len);
inline for (fields) |field| {
- comptime std.debug.assert(field.field_type == NodeIndex);
+ comptime std.debug.assert(field.type == NodeIndex);
c.extra_data.appendAssumeCapacity(@field(extra, field.name));
}
return result;
diff --git a/src/type.zig b/src/type.zig
index 5e6f032798..632be95438 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -748,6 +748,8 @@ pub const Type = extern union {
return false;
if (info_a.host_size != info_b.host_size)
return false;
+ if (info_a.vector_index != info_b.vector_index)
+ return false;
if (info_a.@"allowzero" != info_b.@"allowzero")
return false;
if (info_a.mutable != info_b.mutable)
@@ -1126,6 +1128,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, info.@"addrspace");
std.hash.autoHash(hasher, info.bit_offset);
std.hash.autoHash(hasher, info.host_size);
+ std.hash.autoHash(hasher, info.vector_index);
std.hash.autoHash(hasher, info.@"allowzero");
std.hash.autoHash(hasher, info.mutable);
std.hash.autoHash(hasher, info.@"volatile");
@@ -1467,6 +1470,7 @@ pub const Type = extern union {
.@"addrspace" = payload.@"addrspace",
.bit_offset = payload.bit_offset,
.host_size = payload.host_size,
+ .vector_index = payload.vector_index,
.@"allowzero" = payload.@"allowzero",
.mutable = payload.mutable,
.@"volatile" = payload.@"volatile",
@@ -1855,12 +1859,17 @@ pub const Type = extern union {
.C => try writer.writeAll("[*c]"),
.Slice => try writer.writeAll("[]"),
}
- if (payload.@"align" != 0 or payload.host_size != 0) {
+ if (payload.@"align" != 0 or payload.host_size != 0 or payload.vector_index != .none) {
try writer.print("align({d}", .{payload.@"align"});
if (payload.bit_offset != 0 or payload.host_size != 0) {
try writer.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size });
}
+ if (payload.vector_index == .runtime) {
+ try writer.writeAll(":?");
+ } else if (payload.vector_index != .none) {
+ try writer.print(":{d}", .{@enumToInt(payload.vector_index)});
+ }
try writer.writeAll(") ");
}
if (payload.@"addrspace" != .generic) {
@@ -2185,12 +2194,22 @@ pub const Type = extern union {
.C => try writer.writeAll("[*c]"),
.Slice => try writer.writeAll("[]"),
}
- if (info.@"align" != 0 or info.host_size != 0) {
- try writer.print("align({d}", .{info.@"align"});
+ if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) {
+ if (info.@"align" != 0) {
+ try writer.print("align({d}", .{info.@"align"});
+ } else {
+ const alignment = info.pointee_type.abiAlignment(mod.getTarget());
+ try writer.print("align({d}", .{alignment});
+ }
if (info.bit_offset != 0 or info.host_size != 0) {
try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size });
}
+ if (info.vector_index == .runtime) {
+ try writer.writeAll(":?");
+ } else if (info.vector_index != .none) {
+ try writer.print(":{d}", .{@enumToInt(info.vector_index)});
+ }
try writer.writeAll(") ");
}
if (info.@"addrspace" != .generic) {
@@ -3109,6 +3128,7 @@ pub const Type = extern union {
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
+ if (!(field_ty.hasRuntimeBits())) continue;
switch (try field_ty.abiAlignmentAdvanced(target, strat)) {
.scalar => |field_align| big_align = @max(big_align, field_align),
@@ -3475,7 +3495,10 @@ pub const Type = extern union {
return AbiSizeAdvanced{ .scalar = 0 };
}
- if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 };
+ if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) return AbiSizeAdvanced{ .scalar = 1 };
if (ty.optionalReprIsPayload()) {
return abiSizeAdvanced(child_type, target, strat);
@@ -3504,7 +3527,10 @@ pub const Type = extern union {
// in abiAlignmentAdvanced.
const data = ty.castTag(.error_union).?.data;
const code_size = abiSize(Type.anyerror, target);
- if (!data.payload.hasRuntimeBits()) {
+ if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) {
// Same as anyerror.
return AbiSizeAdvanced{ .scalar = code_size };
}
@@ -3865,6 +3891,7 @@ pub const Type = extern union {
payload.@"addrspace" != .generic or
payload.bit_offset != 0 or
payload.host_size != 0 or
+ payload.vector_index != .none or
payload.@"allowzero" or
payload.@"volatile")
{
@@ -3877,6 +3904,7 @@ pub const Type = extern union {
.@"addrspace" = payload.@"addrspace",
.bit_offset = payload.bit_offset,
.host_size = payload.host_size,
+ .vector_index = payload.vector_index,
.@"allowzero" = payload.@"allowzero",
.mutable = payload.mutable,
.@"volatile" = payload.@"volatile",
@@ -4586,7 +4614,7 @@ pub const Type = extern union {
}
/// Asserts the type is an integer, enum, error set, or vector of one of them.
- pub fn intInfo(self: Type, target: Target) struct { signedness: std.builtin.Signedness, bits: u16 } {
+ pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int {
var ty = self;
while (true) switch (ty.tag()) {
.int_unsigned => return .{
@@ -6200,7 +6228,7 @@ pub const Type = extern union {
}
pub fn Data(comptime t: Tag) type {
- return std.meta.fieldInfo(t.Type(), .data).field_type;
+ return std.meta.fieldInfo(t.Type(), .data).type;
}
};
@@ -6365,11 +6393,18 @@ pub const Type = extern union {
/// When host_size=pointee_abi_size and bit_offset=0, this must be
/// represented with host_size=0 instead.
host_size: u16 = 0,
+ vector_index: VectorIndex = .none,
@"allowzero": bool = false,
mutable: bool = true, // TODO rename this to const, not mutable
@"volatile": bool = false,
size: std.builtin.Type.Pointer.Size = .One,
+ pub const VectorIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ runtime = std.math.maxInt(u32) - 1,
+ _,
+ };
+
pub fn alignment(data: Data, target: Target) u32 {
if (data.@"align" != 0) return data.@"align";
return abiAlignment(data.pointee_type, target);
@@ -6524,7 +6559,8 @@ pub const Type = extern union {
}
if (d.@"align" == 0 and d.@"addrspace" == .generic and
- d.bit_offset == 0 and d.host_size == 0 and !d.@"allowzero" and !d.@"volatile")
+ d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and
+ !d.@"allowzero" and !d.@"volatile")
{
if (d.sentinel) |sent| {
if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
diff --git a/src/value.zig b/src/value.zig
index 3d5636ee34..eeb1228ebc 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -338,7 +338,7 @@ pub const Value = extern union {
}
pub fn Data(comptime t: Tag) type {
- return std.meta.fieldInfo(t.Type(), .data).field_type;
+ return std.meta.fieldInfo(t.Type(), .data).type;
}
};
@@ -1072,11 +1072,13 @@ pub const Value = extern union {
.enum_simple => Module.EnumFull.ValueMap{},
else => unreachable,
};
- break :field_index if (values.entries.len == 0)
+ if (values.entries.len == 0) {
// auto-numbered enum
- @intCast(u32, val.toUnsignedInt(mod.getTarget()))
- else
- @intCast(u32, values.getIndexContext(val, .{ .ty = ty, .mod = mod }).?);
+ break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget()));
+ }
+ var buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = ty.intTagType(&buffer);
+ break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?);
},
};
@@ -1376,6 +1378,7 @@ pub const Value = extern union {
var enum_buffer: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_buffer);
+ if (abi_size == 0) return;
if (abi_size <= @sizeOf(u64)) {
const int: u64 = switch (int_val.tag()) {
.zero => 0,
@@ -1569,6 +1572,7 @@ pub const Value = extern union {
const abi_size = @intCast(usize, ty.abiSize(target));
const bits = int_info.bits;
+ if (bits == 0) return Value.zero;
if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
.signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
.unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
@@ -1756,17 +1760,8 @@ pub const Value = extern union {
const info = ty.intInfo(target);
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer, target);
-
- var limbs_buffer: [4]std.math.big.Limb = undefined;
- var result_bigint = BigIntMutable{
- .limbs = &limbs_buffer,
- .positive = undefined,
- .len = undefined,
- };
- result_bigint.popCount(operand_bigint, info.bits);
-
- return result_bigint.toConst().to(u64) catch unreachable;
+ const int = val.toBigInt(&buffer, target);
+ return @intCast(u64, int.popCount(info.bits));
},
}
}
@@ -2051,7 +2046,11 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
- if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) {
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) {
return false;
}
}
@@ -2800,27 +2799,6 @@ pub const Value = extern union {
};
}
- /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`.
- /// Some scalar values are considered vector-like to avoid needing to allocate
- /// a new `repeated` each time a constant is used.
- pub fn indexVectorlike(val: Value, index: usize) Value {
- return switch (val.tag()) {
- .aggregate => val.castTag(.aggregate).?.data[index],
-
- .repeated => val.castTag(.repeated).?.data,
- // These values will implicitly be treated as `repeated`.
- .zero,
- .one,
- .bool_false,
- .bool_true,
- .int_i64,
- .int_u64,
- => val,
-
- else => unreachable,
- };
- }
-
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value {
@@ -2896,18 +2874,21 @@ pub const Value = extern union {
// to have only one possible value itself.
.the_only_possible_value => return val,
- // pointer to integer casted to pointer of array
- .int_u64, .int_i64 => {
- assert(index == 0);
- return val;
- },
-
.opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer),
.eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer),
.opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer),
.eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer),
+ // These values will implicitly be treated as `repeated`.
+ .zero,
+ .one,
+ .bool_false,
+ .bool_true,
+ .int_i64,
+ .int_u64,
+ => return val,
+
else => unreachable,
}
}
@@ -2924,8 +2905,16 @@ pub const Value = extern union {
.field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod),
.eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod),
.opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod),
- .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isVariable(mod),
- .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isVariable(mod),
+ .decl_ref => {
+ const decl = mod.declPtr(val.castTag(.decl_ref).?.data);
+ assert(decl.has_tv);
+ return decl.val.isVariable(mod);
+ },
+ .decl_ref_mut => {
+ const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index);
+ assert(decl.has_tv);
+ return decl.val.isVariable(mod);
+ },
.variable => true,
else => false,
@@ -3179,18 +3168,21 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value {
- return intToFloatAdvanced(val, arena, int_ty, float_ty, target, null) catch |err| switch (err) {
+ pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value {
+ return intToFloatAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => unreachable,
};
}
- pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value {
+ pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
+ const target = mod.getTarget();
if (int_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), target, opt_sema);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3269,8 +3261,7 @@ pub const Value = extern union {
}
pub const OverflowArithmeticResult = struct {
- /// TODO: Rename to `overflow_bit` and make of type `u1`.
- overflowed: Value,
+ overflow_bit: Value,
wrapped_result: Value,
};
@@ -3296,12 +3287,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3340,12 +3336,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3383,18 +3384,23 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try intMulWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
- overflowed_data[i] = of_math_result.overflowed;
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
+ .overflow_bit = try Value.Tag.aggregate.create(arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
};
}
@@ -3431,7 +3437,7 @@ pub const Value = extern union {
}
return OverflowArithmeticResult{
- .overflowed = makeBool(overflowed),
+ .overflow_bit = boolToInt(overflowed),
.wrapped_result = try fromBigInt(arena, result_bigint.toConst()),
};
}
@@ -3442,16 +3448,20 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return numberMulWrapScalar(lhs, rhs, ty, arena, target);
+ return numberMulWrapScalar(lhs, rhs, ty, arena, mod);
}
/// Supports both floats and ints; handles undefined.
@@ -3460,19 +3470,19 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intMul(lhs, rhs, ty, arena, target);
+ return intMul(lhs, rhs, ty, arena, mod);
}
if (ty.isAnyFloat()) {
- return floatMul(lhs, rhs, ty, arena, target);
+ return floatMul(lhs, rhs, ty, arena, mod);
}
- const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target);
+ const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod);
return overflow_result.wrapped_result;
}
@@ -3482,12 +3492,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3554,11 +3569,14 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3590,11 +3608,16 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3622,37 +3645,46 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return bitwiseNandScalar(lhs, rhs, ty, arena, target);
+ return bitwiseNandScalar(lhs, rhs, ty, arena, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- const anded = try bitwiseAnd(lhs, rhs, ty, arena, target);
+ const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
const all_ones = if (ty.isSignedInt())
try Value.Tag.int_i64.create(arena, -1)
else
- try ty.maxInt(arena, target);
+ try ty.maxInt(arena, mod.getTarget());
- return bitwiseXor(anded, all_ones, ty, arena, target);
+ return bitwiseXor(anded, all_ones, ty, arena, mod);
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3679,11 +3711,16 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3710,11 +3747,16 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3746,11 +3788,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3782,11 +3829,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3853,11 +3905,16 @@ pub const Value = extern union {
};
}
- pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3895,11 +3952,16 @@ pub const Value = extern union {
}
}
- pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3937,11 +3999,16 @@ pub const Value = extern union {
}
}
- pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3969,11 +4036,14 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value {
+ pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3987,12 +4057,17 @@ pub const Value = extern union {
allocator: Allocator,
signedness: std.builtin.Signedness,
bits: Value,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt(target)), target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ var bits_buf: Value.ElemValueBuffer = undefined;
+ const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4015,11 +4090,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4050,18 +4130,23 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
allocator: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try shlWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), allocator, target);
- overflowed_data[i] = of_math_result.overflowed;
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target);
+ overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(allocator, overflowed_data),
+ .overflow_bit = try Value.Tag.aggregate.create(allocator, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(allocator, result_data),
};
}
@@ -4094,7 +4179,7 @@ pub const Value = extern union {
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
- .overflowed = makeBool(overflowed),
+ .overflow_bit = boolToInt(overflowed),
.wrapped_result = try fromBigInt(allocator, result_bigint.toConst()),
};
}
@@ -4104,12 +4189,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4148,16 +4238,20 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return shlTruncScalar(lhs, rhs, ty, arena, target);
+ return shlTruncScalar(lhs, rhs, ty, arena, mod);
}
pub fn shlTruncScalar(
@@ -4165,19 +4259,24 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
- const shifted = try lhs.shl(rhs, ty, arena, target);
- const int_info = ty.intInfo(target);
- const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, target);
+ const shifted = try lhs.shl(rhs, ty, arena, mod);
+ const int_info = ty.intInfo(mod.getTarget());
+ const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod);
return truncated;
}
- pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4219,12 +4318,15 @@ pub const Value = extern union {
val: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatNegScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4252,12 +4354,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4306,12 +4413,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4360,12 +4472,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4414,12 +4531,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4463,11 +4585,14 @@ pub const Value = extern union {
}
}
- pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4500,11 +4625,14 @@ pub const Value = extern union {
}
}
- pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4537,11 +4665,14 @@ pub const Value = extern union {
}
}
- pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4574,11 +4705,14 @@ pub const Value = extern union {
}
}
- pub fn tan(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try tanScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4611,11 +4745,14 @@ pub const Value = extern union {
}
}
- pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4648,11 +4785,14 @@ pub const Value = extern union {
}
}
- pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4685,11 +4825,14 @@ pub const Value = extern union {
}
}
- pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4722,11 +4865,14 @@ pub const Value = extern union {
}
}
- pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4759,11 +4905,14 @@ pub const Value = extern union {
}
}
- pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4796,11 +4945,14 @@ pub const Value = extern union {
}
}
- pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4833,11 +4985,14 @@ pub const Value = extern union {
}
}
- pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4870,11 +5025,14 @@ pub const Value = extern union {
}
}
- pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4907,11 +5065,14 @@ pub const Value = extern union {
}
}
- pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4944,11 +5105,14 @@ pub const Value = extern union {
}
}
- pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4987,16 +5151,23 @@ pub const Value = extern union {
mulend2: Value,
addend: Value,
arena: Allocator,
- target: Target,
- ) Allocator.Error!Value {
+ mod: *Module,
+ ) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
+ var mulend1_buf: Value.ElemValueBuffer = undefined;
+ const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);
+ var mulend2_buf: Value.ElemValueBuffer = undefined;
+ const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf);
+ var addend_buf: Value.ElemValueBuffer = undefined;
+ const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf);
scalar.* = try mulAddScalar(
float_type.scalarType(),
- mulend1.indexVectorlike(i),
- mulend2.indexVectorlike(i),
- addend.indexVectorlike(i),
+ mulend1_elem,
+ mulend2_elem,
+ addend_elem,
arena,
target,
);
@@ -5322,6 +5493,10 @@ pub const Value = extern union {
return if (x) Value.true else Value.false;
}
+ pub fn boolToInt(x: bool) Value {
+ return if (x) Value.one else Value.zero;
+ }
+
pub const RuntimeIndex = enum(u32) {
zero = 0,
comptime_field_ptr = std.math.maxInt(u32),
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index c38e311f67..4f73bd2c3c 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -31,6 +31,7 @@
#include <llvm/IR/Instructions.h>
#include <llvm/IR/LegacyPassManager.h>
#include <llvm/IR/Module.h>
+#include <llvm/IR/OptBisect.h>
#include <llvm/IR/PassManager.h>
#include <llvm/IR/Verifier.h>
#include <llvm/InitializePasses.h>
@@ -412,6 +413,18 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) {
return wrap(Type::getTokenTy(*unwrap(context_ref)));
}
+
+ZIG_EXTERN_C void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit) {
+ // In LLVM15 we just have an OptBisect singleton we can edit.
+ OptBisect& bisect = getOptBisector();
+ bisect.setLimit(limit);
+
+ // In LLVM16 OptBisect will be wrapped in OptPassGate, and will need to be set per context.
+ // static OptBisect _opt_bisector;
+ // _opt_bisector.setLimit(limit);
+ // unwrap(context_ref)->setOptPassGate(_opt_bisector);
+}
+
LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace) {
Function* func = Function::Create(unwrap<FunctionType>(FunctionTy), GlobalValue::ExternalLinkage, AddressSpace, Name, unwrap(M));
return wrap(func);
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 7f9bd0a161..70c53f61a4 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -67,6 +67,8 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co
ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref);
+ZIG_EXTERN_C void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit);
+
ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name,
LLVMTypeRef FunctionTy, unsigned AddressSpace);
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index fd166c5ae0..88186ff514 100644
--- a/stage1/zig1.wasm
+++ b/stage1/zig1.wasm
Binary files differ
diff --git a/test/behavior.zig b/test/behavior.zig
index ebd1e1afb7..72f6bf8e6a 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -91,15 +91,24 @@ test {
_ = @import("behavior/bugs/11213.zig");
_ = @import("behavior/bugs/11787.zig");
_ = @import("behavior/bugs/11816.zig");
+ _ = @import("behavior/bugs/11995.zig");
+ _ = @import("behavior/bugs/12000.zig");
_ = @import("behavior/bugs/12003.zig");
_ = @import("behavior/bugs/12025.zig");
_ = @import("behavior/bugs/12033.zig");
_ = @import("behavior/bugs/12043.zig");
+ _ = @import("behavior/bugs/12051.zig");
+ _ = @import("behavior/bugs/12092.zig");
+ _ = @import("behavior/bugs/12119.zig");
+ _ = @import("behavior/bugs/12142.zig");
+ _ = @import("behavior/bugs/12169.zig");
_ = @import("behavior/bugs/12430.zig");
+ _ = @import("behavior/bugs/12450.zig");
_ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/bugs/12488.zig");
_ = @import("behavior/bugs/12498.zig");
_ = @import("behavior/bugs/12551.zig");
+ _ = @import("behavior/bugs/12571.zig");
_ = @import("behavior/bugs/12644.zig");
_ = @import("behavior/bugs/12680.zig");
_ = @import("behavior/bugs/12723.zig");
@@ -121,12 +130,13 @@ test {
_ = @import("behavior/bugs/13068.zig");
_ = @import("behavior/bugs/13069.zig");
_ = @import("behavior/bugs/13112.zig");
+ _ = @import("behavior/bugs/13113.zig");
_ = @import("behavior/bugs/13128.zig");
_ = @import("behavior/bugs/13159.zig");
- _ = @import("behavior/bugs/13164.zig");
_ = @import("behavior/bugs/13171.zig");
_ = @import("behavior/bugs/13209.zig");
_ = @import("behavior/bugs/13285.zig");
+ _ = @import("behavior/bugs/13366.zig");
_ = @import("behavior/bugs/13435.zig");
_ = @import("behavior/bugs/13664.zig");
_ = @import("behavior/bugs/13714.zig");
@@ -140,6 +150,7 @@ test {
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/decltest.zig");
_ = @import("behavior/defer.zig");
+ _ = @import("behavior/empty_tuple_fields.zig");
_ = @import("behavior/empty_union.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/error.zig");
@@ -158,9 +169,10 @@ test {
_ = @import("behavior/incomplete_struct_param_tld.zig");
_ = @import("behavior/inline_switch.zig");
_ = @import("behavior/int128.zig");
- _ = @import("behavior/int_div.zig");
+ _ = @import("behavior/int_comparison_elision.zig");
_ = @import("behavior/inttoptr.zig");
_ = @import("behavior/ir_block_deps.zig");
+ _ = @import("behavior/lower_strlit_to_vector.zig");
_ = @import("behavior/math.zig");
_ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/member_func.zig");
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 780ee06875..8272a852df 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -540,6 +540,7 @@ test "align(N) on functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig
index 4dd62906c4..6109d84fe2 100644
--- a/test/behavior/alignof.zig
+++ b/test/behavior/alignof.zig
@@ -11,7 +11,6 @@ const Foo = struct {
};
test "@alignOf(T) before referencing T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime try expect(@alignOf(Foo) != maxInt(usize));
if (native_arch == .x86_64) {
comptime try expect(@alignOf(Foo) == 4);
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index b886869be1..012e078531 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -45,6 +45,43 @@ fn getArrayLen(a: []const u32) usize {
return a.len;
}
+test "array concat with undefined" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ {
+ var array = "hello".* ++ @as([5]u8, undefined);
+ array[5..10].* = "world".*;
+ try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
+ }
+ {
+ var array = @as([5]u8, undefined) ++ "world".*;
+ array[0..5].* = "hello".*;
+ try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
+ }
+}
+
+test "array concat with tuple" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const array: [2]u8 = .{ 1, 2 };
+ {
+ const seq = array ++ .{ 3, 4 };
+ try std.testing.expectEqualSlices(u8, &.{ 1, 2, 3, 4 }, &seq);
+ }
+ {
+ const seq = .{ 3, 4 } ++ array;
+ try std.testing.expectEqualSlices(u8, &.{ 3, 4, 1, 2 }, &seq);
+ }
+}
+
+test "array init with concat" {
+ const a = 'a';
+ var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
+ try expect(std.mem.eql(u8, &i, "abcd"));
+}
+
test "array init with mult" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -617,3 +654,11 @@ test "array init of container level array variable" {
S.bar(5, 6);
try expectEqual([2]usize{ 5, 6 }, S.pair);
}
+
+test "runtime initialized sentinel-terminated array literal" {
+ var c: u16 = 300;
+ const f = &[_:0x9999]u16{c};
+ const g = @ptrCast(*[4]u8, f);
+ try std.testing.expect(g[2] == 0x99);
+ try std.testing.expect(g[3] == 0x99);
+}
diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig
index e9a01226b1..f041963494 100644
--- a/test/behavior/asm.zig
+++ b/test/behavior/asm.zig
@@ -23,6 +23,7 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (is_x86_64_linux) {
try expect(this_is_my_alias() == 1234);
@@ -35,6 +36,7 @@ test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// This is only testing compilation.
var a: u32 = 3;
@@ -56,6 +58,7 @@ test "alternative constraints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// Make sure we allow commas as a separator for alternative constraints.
var a: u32 = 3;
@@ -72,6 +75,7 @@ test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
asm volatile (""
:
@@ -121,6 +125,7 @@ test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
asm volatile (""
:
diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig
index ff15aa8dc0..cf7c3b1503 100644
--- a/test/behavior/atomics.zig
+++ b/test/behavior/atomics.zig
@@ -151,6 +151,11 @@ test "cmpxchg on a global variable" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/10627
+ return error.SkipZigTest;
+ }
+
_ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .Acquire, .Monotonic);
try expect(a_global_variable == 42);
}
@@ -213,6 +218,12 @@ test "atomicrmw with floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if ((builtin.zig_backend == .stage2_llvm or builtin.zig_backend == .stage2_c) and
+ builtin.cpu.arch == .aarch64)
+ {
+ // https://github.com/ziglang/zig/issues/10627
+ return error.SkipZigTest;
+ }
try testAtomicRmwFloat();
comptime try testAtomicRmwFloat();
}
@@ -239,31 +250,142 @@ test "atomicrmw with ints" {
return error.SkipZigTest;
}
- try testAtomicRmwInt();
- comptime try testAtomicRmwInt();
+ try testAtomicRmwInts();
+ comptime try testAtomicRmwInts();
+}
+
+fn testAtomicRmwInts() !void {
+ // TODO: Use the max atomic bit size for the target, maybe builtin?
+ try testAtomicRmwInt(.unsigned, 8);
+
+ if (builtin.cpu.arch == .x86_64) {
+ try testAtomicRmwInt(.unsigned, 16);
+ try testAtomicRmwInt(.unsigned, 32);
+ try testAtomicRmwInt(.unsigned, 64);
+ }
}
-fn testAtomicRmwInt() !void {
- var x: u8 = 1;
- var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
+fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usize) !void {
+ const int = std.meta.Int(signedness, N);
+
+ var x: int = 1;
+ var res = @atomicRmw(int, &x, .Xchg, 3, .SeqCst);
try expect(x == 3 and res == 1);
- _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
- try expect(x == 6);
- _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
- try expect(x == 5);
- _ = @atomicRmw(u8, &x, .And, 4, .SeqCst);
- try expect(x == 4);
- _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst);
- try expect(x == 0xfb);
- _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst);
- try expect(x == 0xff);
- _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
- try expect(x == 0xfd);
-
- _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
- try expect(x == 0xfd);
- _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);
- try expect(x == 1);
+
+ res = @atomicRmw(int, &x, .Add, 3, .SeqCst);
+ var y: int = 3;
+ try expect(res == y);
+ y = y + 3;
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Sub, 1, .SeqCst);
+ try expect(res == y);
+ y = y - 1;
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .And, 4, .SeqCst);
+ try expect(res == y);
+ y = y & 4;
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Nand, 4, .SeqCst);
+ try expect(res == y);
+ y = ~(y & 4);
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Or, 6, .SeqCst);
+ try expect(res == y);
+ y = y | 6;
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Xor, 2, .SeqCst);
+ try expect(res == y);
+ y = y ^ 2;
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Max, 1, .SeqCst);
+ try expect(res == y);
+ y = @max(y, 1);
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Min, 1, .SeqCst);
+ try expect(res == y);
+ y = @min(y, 1);
+ try expect(x == y);
+}
+
+test "atomicrmw with 128-bit ints" {
+ if (builtin.cpu.arch != .x86_64) {
+ // TODO: Ideally this could use target.atomicPtrAlignment and check for IntTooBig
+ return error.SkipZigTest;
+ }
+
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ // TODO "ld.lld: undefined symbol: __sync_lock_test_and_set_16" on -mcpu x86_64
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
+
+ try testAtomicRmwInt128(.unsigned);
+ comptime try testAtomicRmwInt128(.unsigned);
+}
+
+fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
+ const int = std.meta.Int(signedness, 128);
+
+ const initial: int = 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd;
+ const replacement: int = 0x00000000_00000005_00000000_00000003;
+
+ var x: int align(16) = initial;
+ var res = @atomicRmw(int, &x, .Xchg, replacement, .SeqCst);
+ try expect(x == replacement and res == initial);
+
+ var operator: int = 0x00000001_00000000_20000000_00000000;
+ res = @atomicRmw(int, &x, .Add, operator, .SeqCst);
+ var y: int = replacement;
+ try expect(res == y);
+ y = y + operator;
+ try expect(x == y);
+
+ operator = 0x00000000_10000000_00000000_20000000;
+ res = @atomicRmw(int, &x, .Sub, operator, .SeqCst);
+ try expect(res == y);
+ y = y - operator;
+ try expect(x == y);
+
+ operator = 0x12345678_87654321_12345678_87654321;
+ res = @atomicRmw(int, &x, .And, operator, .SeqCst);
+ try expect(res == y);
+ y = y & operator;
+ try expect(x == y);
+
+ operator = 0x00000000_10000000_00000000_20000000;
+ res = @atomicRmw(int, &x, .Nand, operator, .SeqCst);
+ try expect(res == y);
+ y = ~(y & operator);
+ try expect(x == y);
+
+ operator = 0x12340000_56780000_67890000_98760000;
+ res = @atomicRmw(int, &x, .Or, operator, .SeqCst);
+ try expect(res == y);
+ y = y | operator;
+ try expect(x == y);
+
+ operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a;
+ res = @atomicRmw(int, &x, .Xor, operator, .SeqCst);
+ try expect(res == y);
+ y = y ^ operator;
+ try expect(x == y);
+
+ operator = 0x00000000_10000000_00000000_20000000;
+ res = @atomicRmw(int, &x, .Max, operator, .SeqCst);
+ try expect(res == y);
+ y = @max(y, operator);
+ try expect(x == y);
+
+ res = @atomicRmw(int, &x, .Min, operator, .SeqCst);
+ try expect(res == y);
+ y = @min(y, operator);
+ try expect(x == y);
}
test "atomics with different types" {
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 43574db06f..ebca81be96 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -37,6 +37,24 @@ test "truncate to non-power-of-two integers" {
try testTrunc(i32, i5, std.math.maxInt(i32), -1);
}
+test "truncate to non-power-of-two integers from 128-bit" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+
+ try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
+ try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00);
+ try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
+ try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010102, 0x02);
+ try testTrunc(i128, i5, -4, -4);
+ try testTrunc(i128, i5, 4, 4);
+ try testTrunc(i128, i5, -28, 4);
+ try testTrunc(i128, i5, 28, -4);
+ try testTrunc(i128, i5, std.math.maxInt(i128), -1);
+}
+
fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void {
try expect(@truncate(Little, big) == little);
}
@@ -113,21 +131,18 @@ fn first4KeysOfHomeRow() []const u8 {
}
test "return string from function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, first4KeysOfHomeRow(), "aoeu"));
}
test "hex escape" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 =
@@ -140,7 +155,6 @@ test "multiline string" {
}
test "multiline string comments at start" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 =
@@ -153,7 +167,6 @@ test "multiline string comments at start" {
}
test "multiline string comments at end" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 =
@@ -166,7 +179,6 @@ test "multiline string comments at end" {
}
test "multiline string comments in middle" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 =
@@ -179,7 +191,6 @@ test "multiline string comments in middle" {
}
test "multiline string comments at multiple places" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 =
@@ -198,7 +209,6 @@ test "string concatenation" {
}
test "array mult operator" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
@@ -208,7 +218,6 @@ const OpaqueA = opaque {};
const OpaqueB = opaque {};
test "opaque types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(*OpaqueA != *OpaqueB);
@@ -226,7 +235,6 @@ test "compile time global reinterpret" {
}
test "cast undefined" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array: [100]u8 = undefined;
@@ -238,7 +246,6 @@ fn testCastUndefined(x: []const u8) void {
}
test "implicit cast after unreachable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -297,7 +304,6 @@ fn fB() []const u8 {
}
test "call function pointer in struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, f3(true), "a"));
@@ -459,7 +465,6 @@ fn testArray2DConstDoublePtr(ptr: *const f32) !void {
}
test "double implicit cast in same expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = @as(i32, @as(u16, nine()));
@@ -492,7 +497,6 @@ fn testStructInFn() !void {
}
test "fn call returning scalar optional in equality expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(getNull() == null);
}
@@ -502,7 +506,6 @@ fn getNull() ?*i32 {
}
test "global variable assignment with optional unwrapping with var initialized to undefined" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -628,7 +631,6 @@ var gdt = [_]GDTEntry{
var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -888,7 +890,6 @@ test "labeled block with runtime branch forwards its result location type to bre
}
test "try in labeled block doesn't cast to wrong type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1034,7 +1035,6 @@ comptime {
}
test "switch inside @as gets correct type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: u32 = 0;
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 73b20f3568..0e8ff65414 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -239,7 +239,6 @@ test "bitcast packed struct to integer and back" {
}
test "implicit cast to error union by returning" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -334,6 +333,11 @@ test "comptime @bitCast packed struct to int and back" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.endian() == .Big) {
+ // https://github.com/ziglang/zig/issues/13782
+ return error.SkipZigTest;
+ }
+
const S = packed struct {
void: void = {},
uint: u8 = 13,
@@ -358,9 +362,6 @@ test "comptime @bitCast packed struct to int and back" {
const rt_cast = @bitCast(S, i);
const ct_cast = comptime @bitCast(S, @as(Int, 0));
inline for (@typeInfo(S).Struct.fields) |field| {
- if (@typeInfo(field.field_type) == .Vector)
- continue; //TODO: https://github.com/ziglang/zig/issues/13201
-
try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name));
}
}
diff --git a/test/behavior/bugs/11995.zig b/test/behavior/bugs/11995.zig
new file mode 100644
index 0000000000..255a366fb2
--- /dev/null
+++ b/test/behavior/bugs/11995.zig
@@ -0,0 +1,34 @@
+const std = @import("std");
+const testing = std.testing;
+const builtin = @import("builtin");
+
+fn wuffs_base__make_io_buffer(arg_data: wuffs_base__slice_u8, arg_meta: *wuffs_base__io_buffer_meta) callconv(.C) void {
+ arg_data.ptr[0] = 'w';
+ arg_meta.closed = false;
+}
+const wuffs_base__io_buffer_meta = extern struct {
+ wi: usize,
+ ri: usize,
+ pos: u64,
+ closed: bool,
+};
+const wuffs_base__slice_u8 = extern struct {
+ ptr: [*c]u8,
+ len: usize,
+};
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ var string: [5]u8 = "hello".*;
+ const arg_data = wuffs_base__slice_u8{ .ptr = @ptrCast([*c]u8, &string), .len = string.len };
+ var arg_meta = wuffs_base__io_buffer_meta{ .wi = 1, .ri = 2, .pos = 3, .closed = true };
+ wuffs_base__make_io_buffer(arg_data, &arg_meta);
+ try std.testing.expectEqualStrings("wello", arg_data.ptr[0..arg_data.len]);
+ try std.testing.expectEqual(@as(usize, 1), arg_meta.wi);
+ try std.testing.expectEqual(@as(usize, 2), arg_meta.ri);
+ try std.testing.expectEqual(@as(u64, 3), arg_meta.pos);
+ try std.testing.expect(!arg_meta.closed);
+}
diff --git a/test/behavior/bugs/13164.zig b/test/behavior/bugs/12000.zig
index 66f4e28fd8..c29fb84270 100644
--- a/test/behavior/bugs/13164.zig
+++ b/test/behavior/bugs/12000.zig
@@ -1,18 +1,16 @@
const std = @import("std");
const builtin = @import("builtin");
-inline fn setLimits(min: ?u32, max: ?u32) !void {
- if (min != null and max != null) {
- try std.testing.expect(min.? <= max.?);
- }
-}
+const T = struct {
+ next: @TypeOf(null, @as(*const T, undefined)),
+};
test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-
- var x: u32 = 42;
- try setLimits(x, null);
+ var t: T = .{ .next = null };
+ try std.testing.expect(t.next == null);
}
diff --git a/test/behavior/bugs/12051.zig b/test/behavior/bugs/12051.zig
new file mode 100644
index 0000000000..efbfc88404
--- /dev/null
+++ b/test/behavior/bugs/12051.zig
@@ -0,0 +1,39 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ const x = X{};
+ try std.testing.expectEqual(@as(u16, 0), x.y.a);
+ try std.testing.expectEqual(false, x.y.b);
+ try std.testing.expectEqual(Z{ .a = 0 }, x.y.c);
+ try std.testing.expectEqual(Z{ .a = 0 }, x.y.d);
+}
+
+const X = struct {
+ y: Y = Y.init(),
+};
+
+const Y = struct {
+ a: u16,
+ b: bool,
+ c: Z,
+ d: Z,
+
+ fn init() Y {
+ return .{
+ .a = 0,
+ .b = false,
+ .c = @bitCast(Z, @as(u32, 0)),
+ .d = @bitCast(Z, @as(u32, 0)),
+ };
+ }
+};
+
+const Z = packed struct {
+ a: u32,
+};
diff --git a/test/behavior/bugs/12092.zig b/test/behavior/bugs/12092.zig
new file mode 100644
index 0000000000..3a7b9766a3
--- /dev/null
+++ b/test/behavior/bugs/12092.zig
@@ -0,0 +1,28 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+const Foo = struct {
+ a: Bar,
+};
+
+const Bar = struct {
+ b: u32,
+};
+
+fn takeFoo(foo: *const Foo) !void {
+ try std.testing.expectEqual(@as(u32, 24), foo.a.b);
+}
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ var baz: u32 = 24;
+ try takeFoo(&.{
+ .a = .{
+ .b = baz,
+ },
+ });
+}
diff --git a/test/behavior/bugs/12119.zig b/test/behavior/bugs/12119.zig
new file mode 100644
index 0000000000..bb12e3565a
--- /dev/null
+++ b/test/behavior/bugs/12119.zig
@@ -0,0 +1,16 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+const u8x32 = @Vector(32, u8);
+const u32x8 = @Vector(8, u32);
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ const zerox32: u8x32 = [_]u8{0} ** 32;
+ const bigsum: u32x8 = @bitCast(u32x8, zerox32);
+ try std.testing.expectEqual(0, @reduce(.Add, bigsum));
+}
diff --git a/test/behavior/bugs/12142.zig b/test/behavior/bugs/12142.zig
new file mode 100644
index 0000000000..db303d617a
--- /dev/null
+++ b/test/behavior/bugs/12142.zig
@@ -0,0 +1,37 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+const Holder = struct {
+ array: []const u8,
+};
+
+const Test = struct {
+ holders: []const Holder,
+};
+
+const Letter = enum(u8) {
+ A = 0x41,
+ B,
+};
+
+fn letter(e: Letter) u8 {
+ return @enumToInt(e);
+}
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ const test_struct = Test{
+ .holders = &.{
+ Holder{
+ .array = &.{
+ letter(.A),
+ },
+ },
+ },
+ };
+ try std.testing.expectEqualStrings("A", test_struct.holders[0].array);
+}
diff --git a/test/behavior/bugs/12169.zig b/test/behavior/bugs/12169.zig
new file mode 100644
index 0000000000..5dd3fdefa9
--- /dev/null
+++ b/test/behavior/bugs/12169.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.endian() == .Big) {
+ // https://github.com/ziglang/zig/issues/13782
+ return error.SkipZigTest;
+ }
+
+ const a = @Vector(2, bool){ true, true };
+ const b = @Vector(1, bool){true};
+ try std.testing.expect(@reduce(.And, a));
+ try std.testing.expect(@reduce(.And, b));
+}
diff --git a/test/behavior/bugs/12450.zig b/test/behavior/bugs/12450.zig
new file mode 100644
index 0000000000..5161e3ffd3
--- /dev/null
+++ b/test/behavior/bugs/12450.zig
@@ -0,0 +1,21 @@
+const expect = @import("std").testing.expect;
+const builtin = @import("builtin");
+
+const Foo = packed struct {
+ a: i32,
+ b: u8,
+};
+
+var buffer: [256]u8 = undefined;
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ var f1: *align(16) Foo = @alignCast(16, @ptrCast(*align(1) Foo, &buffer[0]));
+ try expect(@typeInfo(@TypeOf(f1)).Pointer.alignment == 16);
+ try expect(@ptrToInt(f1) == @ptrToInt(&f1.a));
+ try expect(@typeInfo(@TypeOf(&f1.a)).Pointer.alignment == 16);
+}
diff --git a/test/behavior/bugs/12571.zig b/test/behavior/bugs/12571.zig
new file mode 100644
index 0000000000..ace1e93b3f
--- /dev/null
+++ b/test/behavior/bugs/12571.zig
@@ -0,0 +1,23 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const expect = std.testing.expect;
+
+const Frame = packed struct {
+ num: u20,
+};
+
+const Entry = packed struct {
+ other: u12,
+ frame: Frame,
+};
+
+test {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const frame = Frame{ .num = 0x7FDE };
+ var entry = Entry{ .other = 0, .frame = .{ .num = 0xFFFFF } };
+ entry.frame = frame;
+ try expect(entry.frame.num == 0x7FDE);
+}
diff --git a/test/behavior/bugs/12786.zig b/test/behavior/bugs/12786.zig
index e8c1a2333f..958b1ebf86 100644
--- a/test/behavior/bugs/12786.zig
+++ b/test/behavior/bugs/12786.zig
@@ -8,7 +8,7 @@ fn NamespacedGlobals(comptime modules: anytype) type {
.fields = &.{
.{
.name = "globals",
- .field_type = modules.mach.globals,
+ .type = modules.mach.globals,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(modules.mach.globals),
diff --git a/test/behavior/bugs/12794.zig b/test/behavior/bugs/12794.zig
index 530f81cff2..2d4f8bafd9 100644
--- a/test/behavior/bugs/12794.zig
+++ b/test/behavior/bugs/12794.zig
@@ -7,7 +7,7 @@ fn NamespacedComponents(comptime modules: anytype) type {
.is_tuple = false,
.fields = &.{.{
.name = "components",
- .field_type = @TypeOf(modules.components),
+ .type = @TypeOf(modules.components),
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(@TypeOf(modules.components)),
diff --git a/test/behavior/bugs/12885.zig b/test/behavior/bugs/12885.zig
index 2a802efd04..517def14a1 100644
--- a/test/behavior/bugs/12885.zig
+++ b/test/behavior/bugs/12885.zig
@@ -15,8 +15,8 @@ test "ErrorSet comptime_field_ptr" {
}
const fn_info = .{
- .args = [_]builtin.Type.Fn.Param{
- .{ .is_generic = false, .is_noalias = false, .arg_type = u8 },
+ .params = [_]builtin.Type.Fn.Param{
+ .{ .is_generic = false, .is_noalias = false, .type = u8 },
},
};
const Bar = @Type(.{
@@ -26,7 +26,7 @@ const Bar = @Type(.{
.is_generic = false,
.is_var_args = false,
.return_type = void,
- .args = &fn_info.args,
+ .params = &fn_info.params,
},
});
test "fn comptime_field_ptr" {
diff --git a/test/behavior/bugs/13113.zig b/test/behavior/bugs/13113.zig
new file mode 100644
index 0000000000..cfbf7b6650
--- /dev/null
+++ b/test/behavior/bugs/13113.zig
@@ -0,0 +1,21 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+const Foo = extern struct {
+ a: u8 align(1),
+ b: u16 align(1),
+};
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ const foo = Foo{
+ .a = 1,
+ .b = 2,
+ };
+ try std.testing.expectEqual(1, foo.a);
+ try std.testing.expectEqual(2, foo.b);
+}
diff --git a/test/behavior/bugs/13366.zig b/test/behavior/bugs/13366.zig
new file mode 100644
index 0000000000..ff90ee00c3
--- /dev/null
+++ b/test/behavior/bugs/13366.zig
@@ -0,0 +1,28 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const expect = std.testing.expect;
+
+const ComptimeReason = union(enum) {
+ c_import: struct {
+ a: u32,
+ },
+};
+
+const Block = struct {
+ reason: ?*const ComptimeReason,
+};
+
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var a: u32 = 16;
+ var reason = .{ .c_import = .{ .a = a } };
+ var block = Block{
+ .reason = &reason,
+ };
+ try expect(block.reason.?.c_import.a == 16);
+}
diff --git a/test/behavior/bugs/13435.zig b/test/behavior/bugs/13435.zig
index a23735f2f4..86a43afa0c 100644
--- a/test/behavior/bugs/13435.zig
+++ b/test/behavior/bugs/13435.zig
@@ -8,7 +8,7 @@ fn CreateUnion(comptime T: type) type {
.fields = &[_]std.builtin.Type.UnionField{
.{
.name = "field",
- .field_type = T,
+ .type = T,
.alignment = @alignOf(T),
},
},
diff --git a/test/behavior/bugs/3742.zig b/test/behavior/bugs/3742.zig
index ab5ee92023..a984f0d8e4 100644
--- a/test/behavior/bugs/3742.zig
+++ b/test/behavior/bugs/3742.zig
@@ -39,5 +39,7 @@ test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and
+ builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest;
ArgSerializer.serializeCommand(GET.init("banana"));
}
diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig
index a47f0c0d6c..03c687232f 100644
--- a/test/behavior/bugs/6456.zig
+++ b/test/behavior/bugs/6456.zig
@@ -23,7 +23,7 @@ test "issue 6456" {
fields = fields ++ &[_]StructField{StructField{
.alignment = 0,
.name = name,
- .field_type = usize,
+ .type = usize,
.default_value = &@as(?usize, null),
.is_comptime = false,
}};
diff --git a/test/behavior/byval_arg_var.zig b/test/behavior/byval_arg_var.zig
index 3c3f0f1165..476d0d2e4e 100644
--- a/test/behavior/byval_arg_var.zig
+++ b/test/behavior/byval_arg_var.zig
@@ -5,7 +5,6 @@ var result: []const u8 = "wrong";
test "pass string literal byvalue to a generic var param" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
start();
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index 4d8f22d15f..4cdd54584f 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -107,7 +107,6 @@ test "result location of function call argument through runtime condition and st
}
test "function call with 40 arguments" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -344,3 +343,71 @@ test "inline call doesn't re-evaluate non generic struct" {
try @call(.always_inline, S.foo, ArgTuple{.{ .a = 123, .b = 45 }});
comptime try @call(.always_inline, S.foo, ArgTuple{.{ .a = 123, .b = 45 }});
}
+
+test "Enum constructed by @Type passed as generic argument" {
+ const S = struct {
+ const E = std.meta.FieldEnum(struct {
+ prev_pos: bool,
+ pos: bool,
+ vel: bool,
+ damp_vel: bool,
+ acc: bool,
+ rgba: bool,
+ prev_scale: bool,
+ scale: bool,
+ prev_rotation: bool,
+ rotation: bool,
+ angular_vel: bool,
+ alive: bool,
+ });
+ fn foo(comptime a: E, b: u32) !void {
+ try expect(@enumToInt(a) == b);
+ }
+ };
+ inline for (@typeInfo(S.E).Enum.fields) |_, i| {
+ try S.foo(@intToEnum(S.E, i), i);
+ }
+}
+
+test "generic function with generic function parameter" {
+ const S = struct {
+ fn f(comptime a: fn (anytype) anyerror!void, b: anytype) anyerror!void {
+ try a(b);
+ }
+ fn g(a: anytype) anyerror!void {
+ try expect(a == 123);
+ }
+ };
+ try S.f(S.g, 123);
+}
+
+test "recursive inline call with comptime known argument" {
+ const S = struct {
+ inline fn foo(x: i32) i32 {
+ if (x <= 0) {
+ return 0;
+ } else {
+ return x * 2 + foo(x - 1);
+ }
+ }
+ };
+
+ try expect(S.foo(4) == 20);
+}
+
+test "inline while with @call" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn inc(a: *u32) void {
+ a.* += 1;
+ }
+ };
+ var a: u32 = 0;
+ comptime var i = 0;
+ inline while (i < 10) : (i += 1) {
+ @call(.auto, S.inc, .{&a});
+ }
+ try expect(a == 10);
+}
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 1822922ec2..7c0746bfcd 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -1495,3 +1495,52 @@ test "cast typed undefined to int" {
_ = b;
}
}
+
+test "implicit cast from [:0]T to [*c]T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ var a: [:0]const u8 = "foo";
+ var b: [*c]const u8 = a;
+ var c = std.mem.span(b);
+ try expect(c.len == a.len);
+ try expect(c.ptr == a.ptr);
+}
+
+test "bitcast packed struct with u0" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = packed struct(u2) { a: u0, b: u2 };
+ const s = @bitCast(S, @as(u2, 2));
+ try expect(s.a == 0);
+ try expect(s.b == 2);
+ const i = @bitCast(u2, s);
+ try expect(i == 2);
+}
+
+test "optional pointer coerced to optional allowzero pointer" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var p: ?*u32 = undefined;
+ var q: ?*allowzero u32 = undefined;
+ p = @intToPtr(*u32, 4);
+ q = p;
+ try expect(@ptrToInt(q.?) == 4);
+}
+
+test "ptrToInt on const inside comptime block" {
+ var a = comptime blk: {
+ const b: u8 = 1;
+ const c = @ptrToInt(&b);
+ break :blk c;
+ };
+ try expect(@intToPtr(*const u8, a).* == 1);
+}
+
+test "single item pointer to pointer to array to slice" {
+ var x: i32 = 1234;
+ try expect(@as([]const i32, @as(*[1]i32, &x))[0] == 1234);
+ const z1 = @as([]const i32, @as(*[1]i32, &x));
+ try expect(z1[0] == 1234);
+}
diff --git a/test/behavior/empty_file_level_struct.zig b/test/behavior/empty_file_level_struct.zig
new file mode 100644
index 0000000000..86f0f2b3c7
--- /dev/null
+++ b/test/behavior/empty_file_level_struct.zig
@@ -0,0 +1 @@
+struct {}
diff --git a/test/behavior/empty_file_level_union.zig b/test/behavior/empty_file_level_union.zig
new file mode 100644
index 0000000000..0d24797ffb
--- /dev/null
+++ b/test/behavior/empty_file_level_union.zig
@@ -0,0 +1 @@
+union {}
diff --git a/test/behavior/empty_tuple_fields.zig b/test/behavior/empty_tuple_fields.zig
new file mode 100644
index 0000000000..7309dc9b3e
--- /dev/null
+++ b/test/behavior/empty_tuple_fields.zig
@@ -0,0 +1,26 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+test "empty file level struct" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const T = @import("empty_file_level_struct.zig");
+ const info = @typeInfo(T);
+ try std.testing.expectEqual(@as(usize, 1), info.Struct.fields.len);
+ try std.testing.expectEqualStrings("0", info.Struct.fields[0].name);
+ try std.testing.expect(@typeInfo(info.Struct.fields[0].type) == .Struct);
+}
+
+test "empty file level union" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const T = @import("empty_file_level_union.zig");
+ const info = @typeInfo(T);
+ try std.testing.expectEqual(@as(usize, 1), info.Struct.fields.len);
+ try std.testing.expectEqualStrings("0", info.Struct.fields[0].name);
+ try std.testing.expect(@typeInfo(info.Struct.fields[0].type) == .Union);
+}
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index ae1445dd9e..b2a6cc5a50 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -29,7 +29,6 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void {
}
test "error binary operator" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = errBinaryOperatorG(true) catch 3;
@@ -61,14 +60,12 @@ pub fn baz() anyerror!i32 {
}
test "error wrapping" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect((baz() catch unreachable) == 15);
}
test "unwrap simple value from error" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const i = unwrapSimpleValueFromErrorDo() catch unreachable;
@@ -79,7 +76,6 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize {
}
test "error return in assignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
doErrReturnInAssignment() catch unreachable;
@@ -102,7 +98,6 @@ test "syntax: optional operator in front of error union operator" {
test "widen cast integer payload of error union function call" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -128,7 +123,6 @@ test "debug info for optional error set" {
}
test "implicit cast to optional to error union to return result loc" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -151,7 +145,6 @@ test "implicit cast to optional to error union to return result loc" {
test "fn returning empty error set can be passed as fn returning any error" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
entry();
comptime entry();
@@ -159,7 +152,6 @@ test "fn returning empty error set can be passed as fn returning any error" {
test "fn returning empty error set can be passed as fn returning any error - pointer" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
entryPtr();
comptime entryPtr();
@@ -228,7 +220,6 @@ fn testErrorSetType() !void {
test "explicit error set cast" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testExplicitErrorSetCast(Set1.A);
@@ -248,7 +239,6 @@ fn testExplicitErrorSetCast(set1: Set1) !void {
test "comptime test error for empty error set" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testComptimeTestErrorEmptySet(1234);
@@ -266,7 +256,6 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void {
test "comptime err to int of error set with only 1 possible value" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
@@ -287,8 +276,28 @@ test "inferred empty error set comptime catch" {
S.foo() catch @compileError("fail");
}
+test "error inference with an empty set" {
+ const S = struct {
+ const Struct = struct {
+ pub fn func() (error{})!usize {
+ return 0;
+ }
+ };
+
+ fn AnotherStruct(comptime SubStruct: type) type {
+ return struct {
+ fn anotherFunc() !void {
+ try expect(0 == (try SubStruct.func()));
+ }
+ };
+ }
+ };
+
+ const GeneratedStruct = S.AnotherStruct(S.Struct);
+ try GeneratedStruct.anotherFunc();
+}
+
test "error union peer type resolution" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testErrorUnionPeerTypeResolution(1);
@@ -320,7 +329,6 @@ fn quux_1() !i32 {
}
test "error: Zero sized error set returned with value payload crash" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
_ = try foo3(0);
@@ -333,7 +341,6 @@ fn foo3(b: usize) Error!usize {
}
test "error: Infer error set from literals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
_ = nullLiteral("n") catch |err| handleErrors(err);
@@ -480,7 +487,6 @@ test "nested catch" {
test "function pointer with return type that is error union with payload which is pointer of parent struct" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -611,7 +617,6 @@ test "error set equality" {
}
test "inferred error set equality" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -707,7 +712,6 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" {
}
test "simple else prong allowed even when all errors handled" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -824,7 +828,6 @@ test "alignment of wrapping an error union payload" {
}
test "compare error union and error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: anyerror = error.Foo;
@@ -881,3 +884,15 @@ test "field access of anyerror results in smaller error set" {
try expect(@TypeOf(E2.A) == E2);
try expect(@TypeOf(@field(anyerror, "NotFound")) == error{NotFound});
}
+
+test "optional error union return type" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn foo() ?anyerror!u32 {
+ var x: u32 = 1234;
+ return @as(anyerror!u32, x);
+ }
+ };
+ try expect(1234 == try S.foo().?);
+}
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 97c3a85bbb..2a1f2b7155 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -489,18 +489,11 @@ test "comptime bitwise operators" {
test "comptime shlWithOverflow" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- const ct_shifted: u64 = comptime amt: {
- var amt = @as(u64, 0);
- _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
- break :amt amt;
- };
-
- const rt_shifted: u64 = amt: {
- var amt = @as(u64, 0);
- _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
- break :amt amt;
- };
+ const ct_shifted = @shlWithOverflow(~@as(u64, 0), 16)[0];
+ var a = ~@as(u64, 0);
+ const rt_shifted = @shlWithOverflow(a, 16)[0];
try expect(ct_shifted == rt_shifted);
}
@@ -1554,3 +1547,112 @@ test "comptime function turns function value to function pointer" {
};
comptime try expect(S.foo[0] == &S.Nil);
}
+
+test "container level const and var have unique addresses" {
+ const S = struct {
+ x: i32,
+ y: i32,
+ const c = @This(){ .x = 1, .y = 1 };
+ var v: @This() = c;
+ };
+ var p = &S.c;
+ try std.testing.expect(p.x == S.c.x);
+ S.v.x = 2;
+ try std.testing.expect(p.x == S.c.x);
+}
+
+test "break from block results in type" {
+ const S = struct {
+ fn NewType(comptime T: type) type {
+ const Padded = blk: {
+ if (@sizeOf(T) <= @sizeOf(usize)) break :blk void;
+ break :blk T;
+ };
+
+ return Padded;
+ }
+ };
+ const T = S.NewType(usize);
+ try expect(T == void);
+}
+
+test "struct in comptime false branch is not evaluated" {
+ const S = struct {
+ const comptime_const = 2;
+ fn some(comptime V: type) type {
+ return switch (comptime_const) {
+ 3 => struct { a: V.foo },
+ 2 => V,
+ else => unreachable,
+ };
+ }
+ };
+ try expect(S.some(u32) == u32);
+}
+
+test "result of nested switch assigned to variable" {
+ var zds: u32 = 0;
+ zds = switch (zds) {
+ 0 => switch (zds) {
+ 0...0 => 1234,
+ 1...1 => zds,
+ 2 => zds,
+ else => return,
+ },
+ else => zds,
+ };
+ try expect(zds == 1234);
+}
+
+test "inline for loop of functions returning error unions" {
+ const T1 = struct {
+ fn v() error{}!usize {
+ return 1;
+ }
+ };
+ const T2 = struct {
+ fn v() error{Error}!usize {
+ return 2;
+ }
+ };
+ var a: usize = 0;
+ inline for (.{ T1, T2 }) |T| {
+ a += try T.v();
+ }
+ try expect(a == 3);
+}
+
+test "if inside a switch" {
+ var condition = true;
+ var wave_type: u32 = 0;
+ var sample: i32 = switch (wave_type) {
+ 0 => if (condition) 2 else 3,
+ 1 => 100,
+ 2 => 200,
+ 3 => 300,
+ else => unreachable,
+ };
+ try expect(sample == 2);
+}
+
+test "function has correct return type when previous return is casted to smaller type" {
+ const S = struct {
+ fn foo(b: bool) u16 {
+ if (b) return @as(u8, 0xFF);
+ return 0xFFFF;
+ }
+ };
+ try expect(S.foo(true) == 0xFF);
+}
+
+test "early exit in container level const" {
+ const S = struct {
+ const value = blk: {
+ if (true) {
+ break :blk @as(u32, 1);
+ }
+ break :blk @as(u32, 0);
+ };
+ };
+ try expect(S.value == 1);
+}
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index 4b1bacedf7..14a2362f5d 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -2,7 +2,6 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr non-first field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtr(&foo.c);
@@ -10,7 +9,6 @@ test "@fieldParentPtr non-first field" {
}
test "@fieldParentPtr first field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtrFirst(&foo.a);
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 16add6aee0..8dca94a656 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -145,7 +145,6 @@ fn fnWithUnreachable() noreturn {
test "extern struct with stdcallcc fn pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = extern struct {
ptr: *const fn () callconv(if (builtin.target.cpu.arch == .x86) .Stdcall else .C) i32,
@@ -196,7 +195,6 @@ fn addPointCoords(pt: Point) i32 {
}
test "pass by non-copying value through var arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3);
@@ -292,7 +290,6 @@ fn voidFun(a: i32, b: void, c: i32, d: void) !void {
}
test "call function with empty string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
acceptsString("");
@@ -410,7 +407,6 @@ test "function with inferred error set but returning no error" {
}
test "import passed byref to function in return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -445,7 +441,6 @@ test "implicit cast function to function ptr" {
test "method call with optional and error union first param" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -462,3 +457,63 @@ test "method call with optional and error union first param" {
try s.opt();
try s.errUnion();
}
+
+test "using @ptrCast on function pointers" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const A = struct { data: [4]u8 };
+
+ fn at(arr: *const A, index: usize) *const u8 {
+ return &arr.data[index];
+ }
+
+ fn run() !void {
+ const a = A{ .data = "abcd".* };
+ const casted_fn = @ptrCast(*const fn (*const anyopaque, usize) *const u8, &at);
+ const casted_impl = @ptrCast(*const anyopaque, &a);
+ const ptr = casted_fn(casted_impl, 2);
+ try expect(ptr.* == 'c');
+ }
+ };
+
+ try S.run();
+ // https://github.com/ziglang/zig/issues/2626
+ // try comptime S.run();
+}
+
+test "function returns function returning type" {
+ const S = struct {
+ fn a() fn () type {
+ return (struct {
+ fn b() type {
+ return u32;
+ }
+ }).b;
+ }
+ };
+ try expect(S.a()() == u32);
+}
+
+test "peer type resolution of inferred error set with non-void payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn openDataFile(mode: enum { read, write }) !u32 {
+ return switch (mode) {
+ .read => foo(),
+ .write => bar(),
+ };
+ }
+ fn foo() error{ a, b }!u32 {
+ return 1;
+ }
+ fn bar() error{ c, d }!u32 {
+ return 2;
+ }
+ };
+ try expect(try S.openDataFile(.read) == 1);
+}
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 5f13e660bd..f48000a871 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -227,3 +227,25 @@ test "else continue outer for" {
} else continue;
}
}
+
+test "for loop with else branch" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ {
+ var x = [_]u32{ 1, 2 };
+ const q = for (x) |y| {
+ if ((y & 1) != 0) continue;
+ break y * 2;
+ } else @as(u32, 1);
+ try expect(q == 4);
+ }
+ {
+ var x = [_]u32{ 1, 2 };
+ const q = for (x) |y| {
+ if ((y & 1) != 0) continue;
+ break y * 2;
+ } else @panic("");
+ try expect(q == 4);
+ }
+}
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index dafbfbafe8..17194fc445 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -273,7 +273,6 @@ test "generic function instantiation turns into comptime call" {
var enumFields: [1]std.builtin.Type.EnumField = .{.{ .name = "A", .value = 0 }};
return @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u0,
.fields = &enumFields,
.decls = &.{},
diff --git a/test/behavior/if.zig b/test/behavior/if.zig
index 769306f741..ac11a6585d 100644
--- a/test/behavior/if.zig
+++ b/test/behavior/if.zig
@@ -44,7 +44,6 @@ var global_with_val: anyerror!u32 = 0;
var global_with_err: anyerror!u32 = error.SomeError;
test "unwrap mutable global var" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (global_with_val) |v| {
@@ -81,7 +80,6 @@ test "const result loc, runtime if cond, else unreachable" {
test "if copies its payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig
index 7da6f7954e..f02795cebe 100644
--- a/test/behavior/int128.zig
+++ b/test/behavior/int128.zig
@@ -64,11 +64,23 @@ test "int128" {
}
test "truncate int128" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- var buff: u128 = maxInt(u128);
- try expect(@truncate(u64, buff) == maxInt(u64));
+ {
+ var buff: u128 = maxInt(u128);
+ try expect(@truncate(u64, buff) == maxInt(u64));
+ try expect(@truncate(u90, buff) == maxInt(u90));
+ try expect(@truncate(u128, buff) == maxInt(u128));
+ }
+
+ {
+ var buff: i128 = maxInt(i128);
+ try expect(@truncate(i64, buff) == -1);
+ try expect(@truncate(i90, buff) == -1);
+ try expect(@truncate(i128, buff) == maxInt(i128));
+ }
}
diff --git a/test/behavior/int_comparison_elision.zig b/test/behavior/int_comparison_elision.zig
new file mode 100644
index 0000000000..5e13e00e83
--- /dev/null
+++ b/test/behavior/int_comparison_elision.zig
@@ -0,0 +1,108 @@
+const std = @import("std");
+const minInt = std.math.minInt;
+const maxInt = std.math.maxInt;
+const builtin = @import("builtin");
+
+test "int comparison elision" {
+ testIntEdges(u0);
+ testIntEdges(i0);
+ testIntEdges(u1);
+ testIntEdges(i1);
+ testIntEdges(u4);
+ testIntEdges(i4);
+
+ // TODO: support int types > 128 bits wide in other backends
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ // TODO: panic: integer overflow with int types > 65528 bits wide
+ // TODO: LLVM generates too many parameters for wasmtime when splitting up int > 64000 bits wide
+ testIntEdges(u64000);
+ testIntEdges(i64000);
+}
+
+// All comparisons in this test have a guaranteed result,
+// so one branch of each 'if' should never be analyzed.
+fn testIntEdges(comptime T: type) void {
+ const min = minInt(T);
+ const max = maxInt(T);
+
+ var runtime_val: T = undefined;
+
+ if (min > runtime_val) @compileError("analyzed impossible branch");
+ if (min <= runtime_val) {} else @compileError("analyzed impossible branch");
+ if (runtime_val < min) @compileError("analyzed impossible branch");
+ if (runtime_val >= min) {} else @compileError("analyzed impossible branch");
+
+ if (min - 1 > runtime_val) @compileError("analyzed impossible branch");
+ if (min - 1 >= runtime_val) @compileError("analyzed impossible branch");
+ if (min - 1 < runtime_val) {} else @compileError("analyzed impossible branch");
+ if (min - 1 <= runtime_val) {} else @compileError("analyzed impossible branch");
+ if (min - 1 == runtime_val) @compileError("analyzed impossible branch");
+ if (min - 1 != runtime_val) {} else @compileError("analyzed impossible branch");
+ if (runtime_val < min - 1) @compileError("analyzed impossible branch");
+ if (runtime_val <= min - 1) @compileError("analyzed impossible branch");
+ if (runtime_val > min - 1) {} else @compileError("analyzed impossible branch");
+ if (runtime_val >= min - 1) {} else @compileError("analyzed impossible branch");
+ if (runtime_val == min - 1) @compileError("analyzed impossible branch");
+ if (runtime_val != min - 1) {} else @compileError("analyzed impossible branch");
+
+ if (max >= runtime_val) {} else @compileError("analyzed impossible branch");
+ if (max < runtime_val) @compileError("analyzed impossible branch");
+ if (runtime_val <= max) {} else @compileError("analyzed impossible branch");
+ if (runtime_val > max) @compileError("analyzed impossible branch");
+
+ if (max + 1 > runtime_val) {} else @compileError("analyzed impossible branch");
+ if (max + 1 >= runtime_val) {} else @compileError("analyzed impossible branch");
+ if (max + 1 < runtime_val) @compileError("analyzed impossible branch");
+ if (max + 1 <= runtime_val) @compileError("analyzed impossible branch");
+ if (max + 1 == runtime_val) @compileError("analyzed impossible branch");
+ if (max + 1 != runtime_val) {} else @compileError("analyzed impossible branch");
+ if (runtime_val < max + 1) {} else @compileError("analyzed impossible branch");
+ if (runtime_val <= max + 1) {} else @compileError("analyzed impossible branch");
+ if (runtime_val > max + 1) @compileError("analyzed impossible branch");
+ if (runtime_val >= max + 1) @compileError("analyzed impossible branch");
+ if (runtime_val == max + 1) @compileError("analyzed impossible branch");
+ if (runtime_val != max + 1) {} else @compileError("analyzed impossible branch");
+
+ const undef_const: T = undefined;
+
+ if (min > undef_const) @compileError("analyzed impossible branch");
+ if (min <= undef_const) {} else @compileError("analyzed impossible branch");
+ if (undef_const < min) @compileError("analyzed impossible branch");
+ if (undef_const >= min) {} else @compileError("analyzed impossible branch");
+
+ if (min - 1 > undef_const) @compileError("analyzed impossible branch");
+ if (min - 1 >= undef_const) @compileError("analyzed impossible branch");
+ if (min - 1 < undef_const) {} else @compileError("analyzed impossible branch");
+ if (min - 1 <= undef_const) {} else @compileError("analyzed impossible branch");
+ if (min - 1 == undef_const) @compileError("analyzed impossible branch");
+ if (min - 1 != undef_const) {} else @compileError("analyzed impossible branch");
+ if (undef_const < min - 1) @compileError("analyzed impossible branch");
+ if (undef_const <= min - 1) @compileError("analyzed impossible branch");
+ if (undef_const > min - 1) {} else @compileError("analyzed impossible branch");
+ if (undef_const >= min - 1) {} else @compileError("analyzed impossible branch");
+ if (undef_const == min - 1) @compileError("analyzed impossible branch");
+ if (undef_const != min - 1) {} else @compileError("analyzed impossible branch");
+
+ if (max >= undef_const) {} else @compileError("analyzed impossible branch");
+ if (max < undef_const) @compileError("analyzed impossible branch");
+ if (undef_const <= max) {} else @compileError("analyzed impossible branch");
+ if (undef_const > max) @compileError("analyzed impossible branch");
+
+ if (max + 1 > undef_const) {} else @compileError("analyzed impossible branch");
+ if (max + 1 >= undef_const) {} else @compileError("analyzed impossible branch");
+ if (max + 1 < undef_const) @compileError("analyzed impossible branch");
+ if (max + 1 <= undef_const) @compileError("analyzed impossible branch");
+ if (max + 1 == undef_const) @compileError("analyzed impossible branch");
+ if (max + 1 != undef_const) {} else @compileError("analyzed impossible branch");
+ if (undef_const < max + 1) {} else @compileError("analyzed impossible branch");
+ if (undef_const <= max + 1) {} else @compileError("analyzed impossible branch");
+ if (undef_const > max + 1) @compileError("analyzed impossible branch");
+ if (undef_const >= max + 1) @compileError("analyzed impossible branch");
+ if (undef_const == max + 1) @compileError("analyzed impossible branch");
+ if (undef_const != max + 1) {} else @compileError("analyzed impossible branch");
+}
diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig
new file mode 100644
index 0000000000..4ca4cf716e
--- /dev/null
+++ b/test/behavior/lower_strlit_to_vector.zig
@@ -0,0 +1,18 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+test "strlit to vector" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ const strlit = "0123456789abcdef0123456789ABCDEF";
+ const vec_from_strlit: @Vector(32, u8) = strlit.*;
+ const arr_from_vec = @as([32]u8, vec_from_strlit);
+ for (strlit) |c, i|
+ try std.testing.expect(c == arr_from_vec[i]);
+ try std.testing.expectEqualSlices(u8, strlit, &arr_from_vec);
+}
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index d819743390..2257a116b7 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -9,7 +9,6 @@ const mem = std.mem;
const math = std.math;
test "assignment operators" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -167,6 +166,12 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // This regressed with LLVM 14:
+ // https://github.com/ziglang/zig/issues/12013
+ return error.SkipZigTest;
+ }
+
try testCtzVectors();
comptime try testCtzVectors();
}
@@ -372,6 +377,28 @@ fn testBinaryNot(x: u16) !void {
try expect(~x == 0b0101010101010101);
}
+test "binary not 128-bit" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+
+ try expect(comptime x: {
+ break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa;
+ });
+ try expect(comptime x: {
+ break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa));
+ });
+
+ try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa);
+ try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)));
+}
+
+fn testBinaryNot128(comptime Type: type, x: Type) !void {
+ try expect(~x == @as(Type, 0x55555555_55555555_55555555_55555555));
+}
+
test "division" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -532,9 +559,22 @@ fn testUnsignedNegationWrappingEval(x: u16) !void {
try expect(neg == maxInt(u16));
}
-test "unsigned 64-bit division" {
+test "negation wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try expectEqual(@as(u1, 1), negateWrap(u1, 1));
+}
+
+fn negateWrap(comptime T: type, x: T) T {
+ // This is specifically testing a safety-checked add, so
+ // special case minInt(T) which would overflow otherwise.
+ return if (x == minInt(T)) minInt(T) else ~x + 1;
+}
+
+test "unsigned 64-bit division" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try test_u64_div();
@@ -614,49 +654,74 @@ test "128-bit multiplication" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- var a: i128 = 3;
- var b: i128 = 2;
- var c = a * b;
- try expect(c == 6);
+ {
+ var a: i128 = 3;
+ var b: i128 = 2;
+ var c = a * b;
+ try expect(c == 6);
+
+ a = -3;
+ b = 2;
+ c = a * b;
+ try expect(c == -6);
+ }
+
+ {
+ var a: u128 = 0xffffffffffffffff;
+ var b: u128 = 100;
+ var c = a * b;
+ try expect(c == 0x63ffffffffffffff9c);
+ }
}
test "@addWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
- var result: u8 = undefined;
- try expect(@addWithOverflow(u8, 250, 100, &result));
- try expect(result == 94);
- try expect(!@addWithOverflow(u8, 100, 150, &result));
- try expect(result == 250);
-
+ var a: u8 = 250;
+ const ov = @addWithOverflow(a, 100);
+ try expect(ov[0] == 94);
+ try expect(ov[1] == 1);
+ }
+ {
+ var a: u8 = 100;
+ const ov = @addWithOverflow(a, 150);
+ try expect(ov[0] == 250);
+ try expect(ov[1] == 0);
+ }
+ {
var a: u8 = 200;
var b: u8 = 99;
- try expect(@addWithOverflow(u8, a, b, &result));
- try expect(result == 43);
+ var ov = @addWithOverflow(a, b);
+ try expect(ov[0] == 43);
+ try expect(ov[1] == 1);
b = 55;
- try expect(!@addWithOverflow(u8, a, b, &result));
- try expect(result == 255);
+ ov = @addWithOverflow(a, b);
+ try expect(ov[0] == 255);
+ try expect(ov[1] == 0);
}
{
var a: usize = 6;
var b: usize = 6;
- var res: usize = undefined;
- try expect(!@addWithOverflow(usize, a, b, &res));
- try expect(res == 12);
+ const ov = @addWithOverflow(a, b);
+ try expect(ov[0] == 12);
+ try expect(ov[1] == 0);
}
{
var a: isize = -6;
var b: isize = -6;
- var res: isize = undefined;
- try expect(!@addWithOverflow(isize, a, b, &res));
- try expect(res == -12);
+ const ov = @addWithOverflow(a, b);
+ try expect(ov[0] == -12);
+ try expect(ov[1] == 0);
}
}
test "small int addition" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u2 = 0;
@@ -671,180 +736,206 @@ test "small int addition" {
x += 1;
try expect(x == 3);
- var result: @TypeOf(x) = 3;
- try expect(@addWithOverflow(@TypeOf(x), x, 1, &result));
-
- try expect(result == 0);
+ const ov = @addWithOverflow(x, 1);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 1);
}
test "basic @mulWithOverflow" {
- var result: u8 = undefined;
- try expect(@mulWithOverflow(u8, 86, 3, &result));
- try expect(result == 2);
- try expect(!@mulWithOverflow(u8, 85, 3, &result));
- try expect(result == 255);
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ {
+ var a: u8 = 86;
+ const ov = @mulWithOverflow(a, 3);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
+ }
+ {
+ var a: u8 = 85;
+ const ov = @mulWithOverflow(a, 3);
+ try expect(ov[0] == 255);
+ try expect(ov[1] == 0);
+ }
var a: u8 = 123;
var b: u8 = 2;
- try expect(!@mulWithOverflow(u8, a, b, &result));
- try expect(result == 246);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 246);
+ try expect(ov[1] == 0);
b = 4;
- try expect(@mulWithOverflow(u8, a, b, &result));
- try expect(result == 236);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 236);
+ try expect(ov[1] == 1);
}
-// TODO migrate to this for all backends once they handle more cases
test "extensive @mulWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
{
var a: u5 = 3;
var b: u5 = 10;
- var res: u5 = undefined;
- try expect(!@mulWithOverflow(u5, a, b, &res));
- try expect(res == 30);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 30);
+ try expect(ov[1] == 0);
b = 11;
- try expect(@mulWithOverflow(u5, a, b, &res));
- try expect(res == 1);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 1);
+ try expect(ov[1] == 1);
}
{
var a: i5 = 3;
var b: i5 = -5;
- var res: i5 = undefined;
- try expect(!@mulWithOverflow(i5, a, b, &res));
- try expect(res == -15);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -15);
+ try expect(ov[1] == 0);
b = -6;
- try expect(@mulWithOverflow(i5, a, b, &res));
- try expect(res == 14);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 14);
+ try expect(ov[1] == 1);
}
{
var a: u8 = 3;
var b: u8 = 85;
- var res: u8 = undefined;
- try expect(!@mulWithOverflow(u8, a, b, &res));
- try expect(res == 255);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 255);
+ try expect(ov[1] == 0);
b = 86;
- try expect(@mulWithOverflow(u8, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i8 = 3;
var b: i8 = -42;
- var res: i8 = undefined;
- try expect(!@mulWithOverflow(i8, a, b, &res));
- try expect(res == -126);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -126);
+ try expect(ov[1] == 0);
b = -43;
- try expect(@mulWithOverflow(i8, a, b, &res));
- try expect(res == 127);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 127);
+ try expect(ov[1] == 1);
}
{
var a: u14 = 3;
var b: u14 = 0x1555;
- var res: u14 = undefined;
- try expect(!@mulWithOverflow(u14, a, b, &res));
- try expect(res == 0x3fff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x3fff);
+ try expect(ov[1] == 0);
b = 0x1556;
- try expect(@mulWithOverflow(u14, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i14 = 3;
var b: i14 = -0xaaa;
- var res: i14 = undefined;
- try expect(!@mulWithOverflow(i14, a, b, &res));
- try expect(res == -0x1ffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x1ffe);
+ try expect(ov[1] == 0);
b = -0xaab;
- try expect(@mulWithOverflow(i14, a, b, &res));
- try expect(res == 0x1fff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x1fff);
}
{
var a: u16 = 3;
var b: u16 = 0x5555;
- var res: u16 = undefined;
- try expect(!@mulWithOverflow(u16, a, b, &res));
- try expect(res == 0xffff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0xffff);
+ try expect(ov[1] == 0);
b = 0x5556;
- try expect(@mulWithOverflow(u16, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i16 = 3;
var b: i16 = -0x2aaa;
- var res: i16 = undefined;
- try expect(!@mulWithOverflow(i16, a, b, &res));
- try expect(res == -0x7ffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x7ffe);
+ try expect(ov[1] == 0);
b = -0x2aab;
- try expect(@mulWithOverflow(i16, a, b, &res));
- try expect(res == 0x7fff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x7fff);
+ try expect(ov[1] == 1);
}
{
var a: u30 = 3;
var b: u30 = 0x15555555;
- var res: u30 = undefined;
- try expect(!@mulWithOverflow(u30, a, b, &res));
- try expect(res == 0x3fffffff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x3fffffff);
+ try expect(ov[1] == 0);
b = 0x15555556;
- try expect(@mulWithOverflow(u30, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i30 = 3;
var b: i30 = -0xaaaaaaa;
- var res: i30 = undefined;
- try expect(!@mulWithOverflow(i30, a, b, &res));
- try expect(res == -0x1ffffffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x1ffffffe);
+ try expect(ov[1] == 0);
b = -0xaaaaaab;
- try expect(@mulWithOverflow(i30, a, b, &res));
- try expect(res == 0x1fffffff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x1fffffff);
+ try expect(ov[1] == 1);
}
{
var a: u32 = 3;
var b: u32 = 0x55555555;
- var res: u32 = undefined;
- try expect(!@mulWithOverflow(u32, a, b, &res));
- try expect(res == 0xffffffff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0xffffffff);
+ try expect(ov[1] == 0);
b = 0x55555556;
- try expect(@mulWithOverflow(u32, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i32 = 3;
var b: i32 = -0x2aaaaaaa;
- var res: i32 = undefined;
- try expect(!@mulWithOverflow(i32, a, b, &res));
- try expect(res == -0x7ffffffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x7ffffffe);
+ try expect(ov[1] == 0);
b = -0x2aaaaaab;
- try expect(@mulWithOverflow(i32, a, b, &res));
- try expect(res == 0x7fffffff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x7fffffff);
+ try expect(ov[1] == 1);
}
}
test "@mulWithOverflow bitsize > 32" {
+ // aarch64 fails on a release build of the compiler.
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -852,140 +943,181 @@ test "@mulWithOverflow bitsize > 32" {
{
var a: u62 = 3;
var b: u62 = 0x1555555555555555;
- var res: u62 = undefined;
- try expect(!@mulWithOverflow(u62, a, b, &res));
- try expect(res == 0x3fffffffffffffff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x3fffffffffffffff);
+ try expect(ov[1] == 0);
b = 0x1555555555555556;
- try expect(@mulWithOverflow(u62, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i62 = 3;
var b: i62 = -0xaaaaaaaaaaaaaaa;
- var res: i62 = undefined;
- try expect(!@mulWithOverflow(i62, a, b, &res));
- try expect(res == -0x1ffffffffffffffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x1ffffffffffffffe);
+ try expect(ov[1] == 0);
b = -0xaaaaaaaaaaaaaab;
- try expect(@mulWithOverflow(i62, a, b, &res));
- try expect(res == 0x1fffffffffffffff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x1fffffffffffffff);
+ try expect(ov[1] == 1);
}
{
var a: u64 = 3;
var b: u64 = 0x5555555555555555;
- var res: u64 = undefined;
- try expect(!@mulWithOverflow(u64, a, b, &res));
- try expect(res == 0xffffffffffffffff);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0xffffffffffffffff);
+ try expect(ov[1] == 0);
b = 0x5555555555555556;
- try expect(@mulWithOverflow(u64, a, b, &res));
- try expect(res == 2);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 2);
+ try expect(ov[1] == 1);
}
{
var a: i64 = 3;
var b: i64 = -0x2aaaaaaaaaaaaaaa;
- var res: i64 = undefined;
- try expect(!@mulWithOverflow(i64, a, b, &res));
- try expect(res == -0x7ffffffffffffffe);
+ var ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == -0x7ffffffffffffffe);
+ try expect(ov[1] == 0);
b = -0x2aaaaaaaaaaaaaab;
- try expect(@mulWithOverflow(i64, a, b, &res));
- try expect(res == 0x7fffffffffffffff);
+ ov = @mulWithOverflow(a, b);
+ try expect(ov[0] == 0x7fffffffffffffff);
+ try expect(ov[1] == 1);
}
}
test "@subWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
- var result: u8 = undefined;
- try expect(@subWithOverflow(u8, 1, 2, &result));
- try expect(result == 255);
- try expect(!@subWithOverflow(u8, 1, 1, &result));
- try expect(result == 0);
+ var a: u8 = 1;
+ const ov = @subWithOverflow(a, 2);
+ try expect(ov[0] == 255);
+ try expect(ov[1] == 1);
+ }
+ {
+ var a: u8 = 1;
+ const ov = @subWithOverflow(a, 1);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 0);
+ }
+ {
var a: u8 = 1;
var b: u8 = 2;
- try expect(@subWithOverflow(u8, a, b, &result));
- try expect(result == 255);
+ var ov = @subWithOverflow(a, b);
+ try expect(ov[0] == 255);
+ try expect(ov[1] == 1);
b = 1;
- try expect(!@subWithOverflow(u8, a, b, &result));
- try expect(result == 0);
+ ov = @subWithOverflow(a, b);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 0);
}
{
var a: usize = 6;
var b: usize = 6;
- var res: usize = undefined;
- try expect(!@subWithOverflow(usize, a, b, &res));
- try expect(res == 0);
+ const ov = @subWithOverflow(a, b);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 0);
}
{
var a: isize = -6;
var b: isize = -6;
- var res: isize = undefined;
- try expect(!@subWithOverflow(isize, a, b, &res));
- try expect(res == 0);
+ const ov = @subWithOverflow(a, b);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 0);
}
}
test "@shlWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
{
- var result: u4 = undefined;
var a: u4 = 2;
var b: u2 = 1;
- try expect(!@shlWithOverflow(u4, a, b, &result));
- try expect(result == 4);
+ var ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == 4);
+ try expect(ov[1] == 0);
b = 3;
- try expect(@shlWithOverflow(u4, a, b, &result));
- try expect(result == 0);
+ ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == 0);
+ try expect(ov[1] == 1);
}
{
- var result: i9 = undefined;
var a: i9 = 127;
var b: u4 = 1;
- try expect(!@shlWithOverflow(i9, a, b, &result));
- try expect(result == 254);
+ var ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == 254);
+ try expect(ov[1] == 0);
b = 2;
- try expect(@shlWithOverflow(i9, a, b, &result));
- try expect(result == -4);
+ ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == -4);
+ try expect(ov[1] == 1);
}
{
- var result: u16 = undefined;
- try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
- try expect(result == 0b0111111111111000);
- try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result));
- try expect(result == 0b1011111111111100);
-
+ const ov = @shlWithOverflow(@as(u16, 0b0010111111111111), 3);
+ try expect(ov[0] == 0b0111111111111000);
+ try expect(ov[1] == 1);
+ }
+ {
+ const ov = @shlWithOverflow(@as(u16, 0b0010111111111111), 2);
+ try expect(ov[0] == 0b1011111111111100);
+ try expect(ov[1] == 0);
+ }
+ {
var a: u16 = 0b0000_0000_0000_0011;
var b: u4 = 15;
- try expect(@shlWithOverflow(u16, a, b, &result));
- try expect(result == 0b1000_0000_0000_0000);
+ var ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == 0b1000_0000_0000_0000);
+ try expect(ov[1] == 1);
b = 14;
- try expect(!@shlWithOverflow(u16, a, b, &result));
- try expect(result == 0b1100_0000_0000_0000);
+ ov = @shlWithOverflow(a, b);
+ try expect(ov[0] == 0b1100_0000_0000_0000);
+ try expect(ov[1] == 0);
}
}
test "overflow arithmetic with u0 values" {
- var result: u0 = undefined;
- try expect(!@addWithOverflow(u0, 0, 0, &result));
- try expect(result == 0);
- try expect(!@subWithOverflow(u0, 0, 0, &result));
- try expect(result == 0);
- try expect(!@mulWithOverflow(u0, 0, 0, &result));
- try expect(result == 0);
- try expect(!@shlWithOverflow(u0, 0, 0, &result));
- try expect(result == 0);
+ {
+ var a: u0 = 0;
+ const ov = @addWithOverflow(a, 0);
+ try expect(ov[1] == 0);
+ try expect(ov[1] == 0);
+ }
+ {
+ var a: u0 = 0;
+ const ov = @subWithOverflow(a, 0);
+ try expect(ov[1] == 0);
+ try expect(ov[1] == 0);
+ }
+ {
+ var a: u0 = 0;
+ const ov = @mulWithOverflow(a, 0);
+ try expect(ov[1] == 0);
+ try expect(ov[1] == 0);
+ }
+ {
+ var a: u0 = 0;
+ const ov = @shlWithOverflow(a, 0);
+ try expect(ov[1] == 0);
+ try expect(ov[1] == 0);
+ }
}
test "allow signed integer division/remainder when values are comptime-known and positive or exact" {
diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig
index e9e7b8e505..4e6d9e4c45 100644
--- a/test/behavior/merge_error_sets.zig
+++ b/test/behavior/merge_error_sets.zig
@@ -12,7 +12,6 @@ fn foo() C!void {
}
test "merge error sets" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index 6fa32c47a7..223be69084 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -73,7 +73,6 @@ fn foo(x: ?i32) ?bool {
test "test null runtime" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testTestNullRuntime(null);
@@ -85,7 +84,6 @@ fn testTestNullRuntime(x: ?i32) !void {
test "optional void" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try optionalVoidImpl();
@@ -109,7 +107,6 @@ const Empty = struct {};
test "optional struct{}" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
_ = try optionalEmptyStructImpl();
@@ -135,7 +132,6 @@ test "null with default unwrap" {
}
test "optional pointer to 0 bit type null value at runtime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const EmptyStruct = struct {};
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 9821145658..3e91c6807c 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -448,3 +448,46 @@ test "Optional slice size is optimized" {
a = "hello";
try expectEqualStrings(a.?, "hello");
}
+
+test "peer type resolution in nested if expressions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ const Thing = struct { n: i32 };
+ var a = false;
+ var b = false;
+
+ var result1 = if (a)
+ Thing{ .n = 1 }
+ else
+ null;
+ try expect(result1 == null);
+ try expect(@TypeOf(result1) == ?Thing);
+
+ var result2 = if (a)
+ Thing{ .n = 0 }
+ else if (b)
+ Thing{ .n = 1 }
+ else
+ null;
+ try expect(result2 == null);
+ try expect(@TypeOf(result2) == ?Thing);
+}
+
+test "cast slice to const slice nested in error union and optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const S = struct {
+ fn inner() !?[]u8 {
+ return error.Foo;
+ }
+ fn outer() !?[]const u8 {
+ return inner();
+ }
+ };
+ try std.testing.expectError(error.Foo, S.outer());
+}
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index 392ebc23c8..3baaaee3d8 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -567,3 +567,35 @@ test "packed struct passed to callconv(.C) function" {
}, 5, 4, 3, 2, 1);
try expect(result);
}
+
+test "overaligned pointer to packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ const S = packed struct { a: u32, b: u32 };
+ var foo: S align(4) = .{ .a = 123, .b = 456 };
+ const ptr: *align(4) S = &foo;
+ switch (comptime builtin.cpu.arch.endian()) {
+ .Little => {
+ const ptr_to_b: *u32 = &ptr.b;
+ try expect(ptr_to_b.* == 456);
+ },
+ .Big => {
+ // Byte aligned packed struct field pointers have not been implemented yet.
+ const ptr_to_a: *align(4:0:8) u32 = &ptr.a;
+ try expect(ptr_to_a.* == 123);
+ },
+ }
+}
+
+test "packed struct initialized in bitcast" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ const T = packed struct { val: u8 };
+ var val: u8 = 123;
+ const t = @bitCast(u8, T{ .val = val });
+ try expect(t == val);
+}
diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig
index a73ff161fa..36f70e1a77 100644
--- a/test/behavior/pointers.zig
+++ b/test/behavior/pointers.zig
@@ -66,7 +66,6 @@ test "initialize const optional C pointer to null" {
}
test "assigning integer to C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: i32 = 0;
@@ -193,7 +192,6 @@ test "allowzero pointer and slice" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var ptr = @intToPtr([*]allowzero i32, 0);
diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig
index 982a0b862f..9336d58641 100644
--- a/test/behavior/ptrcast.zig
+++ b/test/behavior/ptrcast.zig
@@ -4,8 +4,6 @@ const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
test "reinterpret bytes as integer with nonzero offset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
try testReinterpretBytesAsInteger();
comptime try testReinterpretBytesAsInteger();
}
@@ -38,7 +36,6 @@ fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void {
}
test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testReinterpretStructWrappedBytesAsInteger();
@@ -173,7 +170,6 @@ test "lower reinterpreted comptime field ptr" {
}
test "reinterpret struct field at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const numNative = comptime Bytes.init(0x12345678);
@@ -203,9 +199,39 @@ test "comptime ptrcast keeps larger alignment" {
}
}
+test "ptrcast of const integer has the correct object size" {
+ const is_value = ~@intCast(isize, std.math.minInt(isize));
+ const is_bytes = @ptrCast([*]const u8, &is_value)[0..@sizeOf(isize)];
+ if (@sizeOf(isize) == 8) {
+ switch (native_endian) {
+ .Little => {
+ try expect(is_bytes[0] == 0xff);
+ try expect(is_bytes[1] == 0xff);
+ try expect(is_bytes[2] == 0xff);
+ try expect(is_bytes[3] == 0xff);
+
+ try expect(is_bytes[4] == 0xff);
+ try expect(is_bytes[5] == 0xff);
+ try expect(is_bytes[6] == 0xff);
+ try expect(is_bytes[7] == 0x7f);
+ },
+ .Big => {
+ try expect(is_bytes[0] == 0x7f);
+ try expect(is_bytes[1] == 0xff);
+ try expect(is_bytes[2] == 0xff);
+ try expect(is_bytes[3] == 0xff);
+
+ try expect(is_bytes[4] == 0xff);
+ try expect(is_bytes[5] == 0xff);
+ try expect(is_bytes[6] == 0xff);
+ try expect(is_bytes[7] == 0xff);
+ },
+ }
+ }
+}
+
test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var buf: [4]u8 = "aoeu".*;
@@ -231,3 +257,28 @@ test "@ptrCast slice to slice" {
try expect(buf[1] == 42);
try expect(alias.len == 4);
}
+
+test "comptime @ptrCast a subset of an array, then write through it" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+
+ comptime {
+ var buff: [16]u8 align(4) = undefined;
+ const len_bytes = @ptrCast(*u32, &buff);
+ len_bytes.* = 16;
+ std.mem.copy(u8, buff[4..], "abcdef");
+ }
+}
+
+test "@ptrCast undefined value at comptime" {
+ const S = struct {
+ fn transmute(comptime T: type, comptime U: type, value: T) U {
+ return @ptrCast(*const U, &value).*;
+ }
+ };
+ comptime {
+ var x = S.transmute([]u8, i32, undefined);
+ _ = x;
+ }
+}
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index c0246f1221..4c3f8ccad5 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -9,10 +9,10 @@ test "reflection: function return type, var args, and param types" {
const info = @typeInfo(@TypeOf(dummy)).Fn;
try expect(info.return_type.? == i32);
try expect(!info.is_var_args);
- try expect(info.args.len == 3);
- try expect(info.args[0].arg_type.? == bool);
- try expect(info.args[1].arg_type.? == i32);
- try expect(info.args[2].arg_type.? == f32);
+ try expect(info.params.len == 3);
+ try expect(info.params[0].type.? == bool);
+ try expect(info.params[1].type.? == i32);
+ try expect(info.params[2].type.? == f32);
}
}
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index b619400040..ef4487d9b9 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -75,7 +75,6 @@ const P = packed struct {
};
test "@offsetOf" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Packed structs have fixed memory layout
@@ -288,3 +287,8 @@ test "runtime instructions inside typeof in comptime only scope" {
try expect(@TypeOf((T{}).b) == i8);
}
}
+
+test "@sizeOf optional of previously unresolved union" {
+ const Node = union { a: usize };
+ try expect(@sizeOf(?Node) == @sizeOf(Node) + @alignOf(Node));
+}
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 9842b908c6..76cadc8d84 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -28,7 +28,6 @@ comptime {
}
test "slicing" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var array: [20]i32 = undefined;
@@ -64,7 +63,6 @@ test "comptime slice of undefined pointer of length 0" {
}
test "implicitly cast array of size 0 to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var msg = [_]u8{};
@@ -121,7 +119,6 @@ test "slice of type" {
}
test "generic malloc free" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -233,7 +230,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
@@ -288,7 +284,6 @@ test "slice type with custom alignment" {
}
test "obtaining a null terminated slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// here we have a normal array
@@ -590,7 +585,6 @@ test "array mult of slice gives ptr to array" {
}
test "slice bounds in comptime concatenation" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const bs = comptime blk: {
@@ -712,3 +706,44 @@ test "global slice field access" {
S.slice.len -= 2;
try expectEqualStrings("trin", S.slice);
}
+
+test "slice of void" {
+ var n: usize = 10;
+ var arr: [12]void = undefined;
+ const slice = @as([]void, &arr)[0..n];
+ try expect(slice.len == n);
+}
+
+test "slice with dereferenced value" {
+ var a: usize = 0;
+ var idx: *usize = &a;
+ _ = blk: {
+ var array = [_]u8{};
+ break :blk array[idx.*..];
+ };
+ const res = blk: {
+ var array = [_]u8{};
+ break :blk array[idx.*..];
+ };
+ try expect(res.len == 0);
+}
+
+test "empty slice ptr is non null" {
+ if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64 and (builtin.os.tag == .macos or builtin.os.tag == .windows)) return error.SkipZigTest; // TODO
+
+ const empty_slice: []u8 = &[_]u8{};
+ const p: [*]u8 = empty_slice.ptr + 0;
+ const t = @ptrCast([*]i8, p);
+ try expect(@ptrToInt(t) == @ptrToInt(empty_slice.ptr));
+}
+
+test "slice decays to many pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ var buf: [8]u8 = "abcdefg\x00".*;
+ const p: [*:0]const u8 = buf[0..7 :0];
+ try expectEqualStrings(buf[0..7], std.mem.span(p));
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index db7092ab82..20a09f92cc 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -106,7 +106,6 @@ fn testMutation(foo: *StructFoo) void {
}
test "struct byval assign" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var foo1: StructFoo = undefined;
@@ -140,7 +139,6 @@ fn returnEmptyStructInstance() StructWithNoFields {
}
test "fn call of struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Foo = struct {
@@ -323,7 +321,6 @@ const VoidStructFieldsFoo = struct {
};
test "return empty struct from fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
_ = testReturnEmptyStructFromFn();
@@ -334,7 +331,6 @@ fn testReturnEmptyStructFromFn() EmptyStruct2 {
}
test "pass slice of empty struct to fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(testPassSliceOfEmptyStructToFn(&[_]EmptyStruct2{EmptyStruct2{}}) == 1);
@@ -369,7 +365,6 @@ const EmptyStruct = struct {
};
test "align 1 field before self referential align 8 field as slice return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = alloc(Expr);
@@ -1094,7 +1089,6 @@ test "packed struct with undefined initializers" {
test "for loop over pointers to struct, getting field from struct pointer" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1236,7 +1230,6 @@ test "initialize struct with empty literal" {
}
test "loading a struct pointer perfoms a copy" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1359,23 +1352,53 @@ test "under-aligned struct field" {
try expect(result == 1234);
}
-test "address of zero-bit field is equal to address of only field" {
+test "fieldParentPtr of a zero-bit field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- {
- const A = struct { b: void = {}, u: u8 };
- var a = A{ .u = 0 };
- const a_ptr = @fieldParentPtr(A, "b", &a.b);
- try std.testing.expectEqual(&a, a_ptr);
- }
- {
- const A = struct { u: u8, b: void = {} };
- var a = A{ .u = 0 };
- const a_ptr = @fieldParentPtr(A, "b", &a.b);
- try std.testing.expectEqual(&a, a_ptr);
- }
+ const S = struct {
+ fn testStruct(comptime A: type) !void {
+ {
+ const a = A{ .u = 0 };
+ const b_ptr = &a.b;
+ const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ try std.testing.expectEqual(&a, a_ptr);
+ }
+ {
+ var a = A{ .u = 0 };
+ const b_ptr = &a.b;
+ const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ try std.testing.expectEqual(&a, a_ptr);
+ }
+ }
+ fn testNestedStruct(comptime A: type) !void {
+ {
+ const a = A{ .u = 0 };
+ const c_ptr = &a.b.c;
+ const b_ptr = @fieldParentPtr(@TypeOf(a.b), "c", c_ptr);
+ try std.testing.expectEqual(&a.b, b_ptr);
+ const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ try std.testing.expectEqual(&a, a_ptr);
+ }
+ {
+ var a = A{ .u = 0 };
+ const c_ptr = &a.b.c;
+ const b_ptr = @fieldParentPtr(@TypeOf(a.b), "c", c_ptr);
+ try std.testing.expectEqual(&a.b, b_ptr);
+ const a_ptr = @fieldParentPtr(A, "b", b_ptr);
+ try std.testing.expectEqual(&a, a_ptr);
+ }
+ }
+ fn doTheTest() !void {
+ try testStruct(struct { b: void = {}, u: u8 });
+ try testStruct(struct { u: u8, b: void = {} });
+ try testNestedStruct(struct { b: struct { c: void = {} } = .{}, u: u8 });
+ try testNestedStruct(struct { u: u8, b: struct { c: void = {} } = .{} });
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
}
test "struct field has a pointer to an aligned version of itself" {
@@ -1389,3 +1412,146 @@ test "struct field has a pointer to an aligned version of itself" {
try expect(&e == e.next);
}
+
+test "struct has only one reference" {
+ const S = struct {
+ fn optionalStructParam(_: ?struct { x: u8 }) void {}
+ fn errorUnionStructParam(_: error{}!struct { x: u8 }) void {}
+ fn optionalStructReturn() ?struct { x: u8 } {
+ return null;
+ }
+ fn errorUnionStructReturn() error{Foo}!struct { x: u8 } {
+ return error.Foo;
+ }
+
+ fn pointerPackedStruct(_: *packed struct { x: u8 }) void {}
+ fn nestedPointerPackedStruct(_: struct { x: *packed struct { x: u8 } }) void {}
+ fn pointerNestedPackedStruct(_: *struct { x: packed struct { x: u8 } }) void {}
+ fn pointerNestedPointerPackedStruct(_: *struct { x: *packed struct { x: u8 } }) void {}
+
+ fn optionalComptimeIntParam(comptime x: ?comptime_int) comptime_int {
+ return x.?;
+ }
+ fn errorUnionComptimeIntParam(comptime x: error{}!comptime_int) comptime_int {
+ return x catch unreachable;
+ }
+ };
+
+ const optional_struct_param: *const anyopaque = &S.optionalStructParam;
+ const error_union_struct_param: *const anyopaque = &S.errorUnionStructParam;
+ try expect(optional_struct_param != error_union_struct_param);
+
+ const optional_struct_return: *const anyopaque = &S.optionalStructReturn;
+ const error_union_struct_return: *const anyopaque = &S.errorUnionStructReturn;
+ try expect(optional_struct_return != error_union_struct_return);
+
+ const pointer_packed_struct: *const anyopaque = &S.pointerPackedStruct;
+ const nested_pointer_packed_struct: *const anyopaque = &S.nestedPointerPackedStruct;
+ try expect(pointer_packed_struct != nested_pointer_packed_struct);
+
+ const pointer_nested_packed_struct: *const anyopaque = &S.pointerNestedPackedStruct;
+ const pointer_nested_pointer_packed_struct: *const anyopaque = &S.pointerNestedPointerPackedStruct;
+ try expect(pointer_nested_packed_struct != pointer_nested_pointer_packed_struct);
+
+ try expectEqual(@alignOf(struct {}), S.optionalComptimeIntParam(@alignOf(struct {})));
+ try expectEqual(@alignOf(struct { x: u8 }), S.errorUnionComptimeIntParam(@alignOf(struct { x: u8 })));
+ try expectEqual(@sizeOf(struct { x: u16 }), S.optionalComptimeIntParam(@sizeOf(struct { x: u16 })));
+ try expectEqual(@sizeOf(struct { x: u32 }), S.errorUnionComptimeIntParam(@sizeOf(struct { x: u32 })));
+}
+
+test "no dependency loop on pointer to optional struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const A = struct { b: B };
+ const B = struct { a: *?A };
+ };
+ var a1: ?S.A = null;
+ var a2: ?S.A = .{ .b = .{ .a = &a1 } };
+ a1 = .{ .b = .{ .a = &a2 } };
+
+ try expect(a1.?.b.a == &a2);
+ try expect(a2.?.b.a == &a1);
+}
+
+test "discarded struct initialization works as expected" {
+ const S = struct { a: u32 };
+ _ = S{ .a = 1 };
+}
+
+test "function pointer in struct returns the struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const A = struct {
+ const A = @This();
+ f: *const fn () A,
+
+ fn f() A {
+ return .{ .f = f };
+ }
+ };
+ var a = A.f();
+ try expect(a.f == A.f);
+}
+
+test "no dependency loop on optional field wrapped in generic function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
+ const S = struct {
+ fn Atomic(comptime T: type) type {
+ return T;
+ }
+ const A = struct { b: Atomic(?*B) };
+ const B = struct { a: ?*A };
+ };
+ var a: S.A = .{ .b = null };
+ var b: S.B = .{ .a = &a };
+ a.b = &b;
+
+ try expect(a.b == &b);
+ try expect(b.a == &a);
+}
+
+test "optional field init with tuple" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ a: ?struct { b: u32 },
+ };
+ var a: u32 = 0;
+ var b = S{
+ .a = .{ .b = a },
+ };
+ try expect(b.a.?.b == a);
+}
+
+test "if inside struct init inside if" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const MyStruct = struct { x: u32 };
+ const b: u32 = 5;
+ var i: u32 = 1;
+ var my_var = if (i < 5)
+ MyStruct{
+ .x = 1 + if (i > 0) b else 0,
+ }
+ else
+ MyStruct{
+ .x = 1 + if (i > 0) b else 0,
+ };
+ try expect(my_var.x == 6);
+}
+
+test "optional generic function label struct field" {
+ const Options = struct {
+ isFoo: ?fn (type) u8 = defaultIsFoo,
+ fn defaultIsFoo(comptime _: type) u8 {
+ return 123;
+ }
+ };
+ try expect((Options{}).isFoo.?(u8) == 123);
+}
diff --git a/test/behavior/struct_contains_slice_of_itself.zig b/test/behavior/struct_contains_slice_of_itself.zig
index 507c9ecd9d..adb1c31047 100644
--- a/test/behavior/struct_contains_slice_of_itself.zig
+++ b/test/behavior/struct_contains_slice_of_itself.zig
@@ -12,7 +12,6 @@ const NodeAligned = struct {
};
test "struct contains slice of itself" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var other_nodes = [_]Node{
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index a183dc7c3b..3bb9c35a4e 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -390,7 +390,6 @@ fn switchWithUnreachable(x: i32) i32 {
}
test "capture value of switch with all unreachable prongs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const x = return_a_number() catch |err| switch (err) {
@@ -494,7 +493,6 @@ test "switch prongs with error set cases make a new error set type for capture v
}
test "return result loc and then switch with range implicit casted to error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig
index d975002f8a..8143b1bddd 100644
--- a/test/behavior/translate_c_macros.zig
+++ b/test/behavior/translate_c_macros.zig
@@ -221,3 +221,7 @@ test "Macro that uses remainder operator. Issue #13346" {
),
);
}
+
+test "@typeInfo on @cImport result" {
+ try expect(@typeInfo(h).Struct.decls.len > 1);
+}
diff --git a/test/behavior/try.zig b/test/behavior/try.zig
index 3ad192095d..36428a4616 100644
--- a/test/behavior/try.zig
+++ b/test/behavior/try.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "try on error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try tryOnErrorUnionImpl();
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 9738d4e75b..f1c15fb3e3 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -2,6 +2,8 @@ const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
+const expectEqualStrings = std.testing.expectEqualStrings;
+const expectEqual = std.testing.expectEqual;
test "tuple concatenation" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -136,14 +138,14 @@ test "array-like initializer for tuple types" {
.fields = &.{
.{
.name = "0",
- .field_type = i32,
+ .type = i32,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(i32),
},
.{
.name = "1",
- .field_type = u8,
+ .type = u8,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(i32),
@@ -232,31 +234,31 @@ test "fieldParentPtr of anon struct" {
test "offsetOf tuple" {
var x: u32 = 0;
const T = @TypeOf(.{ x, x });
- _ = @offsetOf(T, "1");
+ try expect(@offsetOf(T, "1") == @sizeOf(u32));
}
test "offsetOf anon struct" {
var x: u32 = 0;
const T = @TypeOf(.{ .foo = x, .bar = x });
- _ = @offsetOf(T, "bar");
+ try expect(@offsetOf(T, "bar") == @sizeOf(u32));
}
test "initializing tuple with mixed comptime-runtime fields" {
- if (true) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var x: u32 = 15;
const T = @TypeOf(.{ @as(i32, -1234), @as(u32, 5678), x });
var a: T = .{ -1234, 5678, x + 1 };
- _ = a;
+ try expect(a[2] == 16);
}
test "initializing anon struct with mixed comptime-runtime fields" {
- if (true) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var x: u32 = 15;
const T = @TypeOf(.{ .foo = @as(i32, -1234), .bar = x });
var a: T = .{ .foo = -1234, .bar = x + 1 };
- _ = a;
+ try expect(a.bar == 16);
}
test "tuple in tuple passed to generic function" {
@@ -314,7 +316,7 @@ test "zero sized struct in tuple handled correctly" {
.decls = &.{},
.fields = &.{.{
.name = "0",
- .field_type = struct {},
+ .type = struct {},
.default_value = null,
.is_comptime = false,
.alignment = 0,
@@ -340,3 +342,58 @@ test "tuple type with void field and a runtime field" {
var t: T = .{ 5, {} };
try expect(t[0] == 5);
}
+
+test "branching inside tuple literal" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn foo(a: anytype) !void {
+ try expect(a[0] == 1234);
+ }
+ };
+ var a = false;
+ try S.foo(.{if (a) @as(u32, 5678) else @as(u32, 1234)});
+}
+
+test "tuple initialized with a runtime known value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ const E = union(enum) { e: []const u8 };
+ const W = union(enum) { w: E };
+ var e = E{ .e = "test" };
+ const w = .{W{ .w = e }};
+ try expectEqualStrings(w[0].w.e, "test");
+}
+
+test "tuple of struct concatenation and coercion to array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const StructWithDefault = struct { value: f32 = 42 };
+ const SomeStruct = struct { array: [4]StructWithDefault };
+
+ const value1 = SomeStruct{ .array = .{StructWithDefault{}} ++ [_]StructWithDefault{.{}} ** 3 };
+ const value2 = SomeStruct{ .array = .{.{}} ++ [_]StructWithDefault{.{}} ** 3 };
+
+ try expectEqual(value1, value2);
+}
+
+test "nested runtime conditionals in tuple initializer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ var data: u8 = 0;
+ const x = .{
+ if (data != 0) "" else switch (@truncate(u1, data)) {
+ 0 => "up",
+ 1 => "down",
+ },
+ };
+ try expectEqualStrings("up", x[0]);
+}
diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig
index 7beab1ca8f..87d4997c8b 100644
--- a/test/behavior/tuple_declarations.zig
+++ b/test/behavior/tuple_declarations.zig
@@ -20,13 +20,13 @@ test "tuple declaration type info" {
try expect(info.is_tuple);
try expectEqualStrings(info.fields[0].name, "0");
- try expect(info.fields[0].field_type == u32);
+ try expect(info.fields[0].type == u32);
try expect(@ptrCast(*const u32, @alignCast(@alignOf(u32), info.fields[0].default_value)).* == 1);
try expect(info.fields[0].is_comptime);
try expect(info.fields[0].alignment == 2);
try expectEqualStrings(info.fields[1].name, "1");
- try expect(info.fields[1].field_type == []const u8);
+ try expect(info.fields[1].type == []const u8);
try expect(info.fields[1].default_value == null);
try expect(!info.fields[1].is_comptime);
try expect(info.fields[1].alignment == @alignOf([]const u8));
@@ -44,13 +44,13 @@ test "tuple declaration type info" {
try expect(info.is_tuple);
try expectEqualStrings(info.fields[0].name, "0");
- try expect(info.fields[0].field_type == u1);
+ try expect(info.fields[0].type == u1);
try expectEqualStrings(info.fields[1].name, "1");
- try expect(info.fields[1].field_type == u30);
+ try expect(info.fields[1].type == u30);
try expectEqualStrings(info.fields[2].name, "2");
- try expect(info.fields[2].field_type == u1);
+ try expect(info.fields[2].type == u1);
}
}
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index b2db2a73b5..325bf0a8ed 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -268,10 +268,10 @@ test "Type.Struct" {
const infoA = @typeInfo(A).Struct;
try testing.expectEqual(Type.ContainerLayout.Auto, infoA.layout);
try testing.expectEqualSlices(u8, "x", infoA.fields[0].name);
- try testing.expectEqual(u8, infoA.fields[0].field_type);
+ try testing.expectEqual(u8, infoA.fields[0].type);
try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[0].default_value);
try testing.expectEqualSlices(u8, "y", infoA.fields[1].name);
- try testing.expectEqual(u32, infoA.fields[1].field_type);
+ try testing.expectEqual(u32, infoA.fields[1].type);
try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[1].default_value);
try testing.expectEqualSlices(Type.Declaration, &.{}, infoA.decls);
try testing.expectEqual(@as(bool, false), infoA.is_tuple);
@@ -286,10 +286,10 @@ test "Type.Struct" {
const infoB = @typeInfo(B).Struct;
try testing.expectEqual(Type.ContainerLayout.Extern, infoB.layout);
try testing.expectEqualSlices(u8, "x", infoB.fields[0].name);
- try testing.expectEqual(u8, infoB.fields[0].field_type);
+ try testing.expectEqual(u8, infoB.fields[0].type);
try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value);
try testing.expectEqualSlices(u8, "y", infoB.fields[1].name);
- try testing.expectEqual(u32, infoB.fields[1].field_type);
+ try testing.expectEqual(u32, infoB.fields[1].type);
try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoB.fields[1].default_value.?).*);
try testing.expectEqual(@as(usize, 0), infoB.decls.len);
try testing.expectEqual(@as(bool, false), infoB.is_tuple);
@@ -298,10 +298,10 @@ test "Type.Struct" {
const infoC = @typeInfo(C).Struct;
try testing.expectEqual(Type.ContainerLayout.Packed, infoC.layout);
try testing.expectEqualSlices(u8, "x", infoC.fields[0].name);
- try testing.expectEqual(u8, infoC.fields[0].field_type);
+ try testing.expectEqual(u8, infoC.fields[0].type);
try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*);
try testing.expectEqualSlices(u8, "y", infoC.fields[1].name);
- try testing.expectEqual(u32, infoC.fields[1].field_type);
+ try testing.expectEqual(u32, infoC.fields[1].type);
try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoC.fields[1].default_value.?).*);
try testing.expectEqual(@as(usize, 0), infoC.decls.len);
try testing.expectEqual(@as(bool, false), infoC.is_tuple);
@@ -311,10 +311,10 @@ test "Type.Struct" {
const infoD = @typeInfo(D).Struct;
try testing.expectEqual(Type.ContainerLayout.Auto, infoD.layout);
try testing.expectEqualSlices(u8, "x", infoD.fields[0].name);
- try testing.expectEqual(comptime_int, infoD.fields[0].field_type);
+ try testing.expectEqual(comptime_int, infoD.fields[0].type);
try testing.expectEqual(@as(comptime_int, 3), @ptrCast(*const comptime_int, infoD.fields[0].default_value.?).*);
try testing.expectEqualSlices(u8, "y", infoD.fields[1].name);
- try testing.expectEqual(comptime_int, infoD.fields[1].field_type);
+ try testing.expectEqual(comptime_int, infoD.fields[1].type);
try testing.expectEqual(@as(comptime_int, 5), @ptrCast(*const comptime_int, infoD.fields[1].default_value.?).*);
try testing.expectEqual(@as(usize, 0), infoD.decls.len);
try testing.expectEqual(@as(bool, false), infoD.is_tuple);
@@ -324,10 +324,10 @@ test "Type.Struct" {
const infoE = @typeInfo(E).Struct;
try testing.expectEqual(Type.ContainerLayout.Auto, infoE.layout);
try testing.expectEqualSlices(u8, "0", infoE.fields[0].name);
- try testing.expectEqual(comptime_int, infoE.fields[0].field_type);
+ try testing.expectEqual(comptime_int, infoE.fields[0].type);
try testing.expectEqual(@as(comptime_int, 1), @ptrCast(*const comptime_int, infoE.fields[0].default_value.?).*);
try testing.expectEqualSlices(u8, "1", infoE.fields[1].name);
- try testing.expectEqual(comptime_int, infoE.fields[1].field_type);
+ try testing.expectEqual(comptime_int, infoE.fields[1].type);
try testing.expectEqual(@as(comptime_int, 2), @ptrCast(*const comptime_int, infoE.fields[1].default_value.?).*);
try testing.expectEqual(@as(usize, 0), infoE.decls.len);
try testing.expectEqual(@as(bool, true), infoE.is_tuple);
@@ -354,7 +354,6 @@ test "Type.Enum" {
const Foo = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u8,
.fields = &.{
.{ .name = "a", .value = 1 },
@@ -369,7 +368,6 @@ test "Type.Enum" {
try testing.expectEqual(@as(u8, 5), @enumToInt(Foo.b));
const Bar = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u32,
.fields = &.{
.{ .name = "a", .value = 1 },
@@ -396,8 +394,8 @@ test "Type.Union" {
.layout = .Extern,
.tag_type = null,
.fields = &.{
- .{ .name = "int", .field_type = i32, .alignment = @alignOf(f32) },
- .{ .name = "float", .field_type = f32, .alignment = @alignOf(f32) },
+ .{ .name = "int", .type = i32, .alignment = @alignOf(f32) },
+ .{ .name = "float", .type = f32, .alignment = @alignOf(f32) },
},
.decls = &.{},
},
@@ -412,8 +410,8 @@ test "Type.Union" {
.layout = .Packed,
.tag_type = null,
.fields = &.{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "signed", .type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .type = u32, .alignment = @alignOf(u32) },
},
.decls = &.{},
},
@@ -424,7 +422,6 @@ test "Type.Union" {
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u1,
.fields = &.{
.{ .name = "signed", .value = 0 },
@@ -439,8 +436,8 @@ test "Type.Union" {
.layout = .Auto,
.tag_type = Tag,
.fields = &.{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "signed", .type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .type = u32, .alignment = @alignOf(u32) },
},
.decls = &.{},
},
@@ -456,7 +453,6 @@ test "Type.Union from Type.Enum" {
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u0,
.fields = &.{
.{ .name = "working_as_expected", .value = 0 },
@@ -470,7 +466,7 @@ test "Type.Union from Type.Enum" {
.layout = .Auto,
.tag_type = Tag,
.fields = &.{
- .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "working_as_expected", .type = u32, .alignment = @alignOf(u32) },
},
.decls = &.{},
},
@@ -487,7 +483,7 @@ test "Type.Union from regular enum" {
.layout = .Auto,
.tag_type = E,
.fields = &.{
- .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "working_as_expected", .type = u32, .alignment = @alignOf(u32) },
},
.decls = &.{},
},
@@ -512,9 +508,9 @@ test "Type.Fn" {
.is_generic = false,
.is_var_args = false,
.return_type = void,
- .args = &.{
- .{ .is_generic = false, .is_noalias = false, .arg_type = c_int },
- .{ .is_generic = false, .is_noalias = false, .arg_type = some_ptr },
+ .params = &.{
+ .{ .is_generic = false, .is_noalias = false, .type = c_int },
+ .{ .is_generic = false, .is_noalias = false, .type = some_ptr },
},
} };
@@ -537,7 +533,7 @@ test "reified struct field name from optional payload" {
.layout = .Auto,
.fields = &.{.{
.name = name,
- .field_type = u8,
+ .type = u8,
.default_value = null,
.is_comptime = false,
.alignment = 1,
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index 2d9eec645a..0fb17519e6 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -238,7 +238,6 @@ fn testEnum() !void {
const os_info = @typeInfo(Os);
try expect(os_info == .Enum);
- try expect(os_info.Enum.layout == .Auto);
try expect(os_info.Enum.fields.len == 4);
try expect(mem.eql(u8, os_info.Enum.fields[1].name, "Macos"));
try expect(os_info.Enum.fields[3].value == 3);
@@ -257,7 +256,7 @@ fn testUnion() !void {
try expect(typeinfo_info.Union.layout == .Auto);
try expect(typeinfo_info.Union.tag_type.? == TypeId);
try expect(typeinfo_info.Union.fields.len == 24);
- try expect(typeinfo_info.Union.fields[4].field_type == @TypeOf(@typeInfo(u8).Int));
+ try expect(typeinfo_info.Union.fields[4].type == @TypeOf(@typeInfo(u8).Int));
try expect(typeinfo_info.Union.decls.len == 22);
const TestNoTagUnion = union {
@@ -271,7 +270,7 @@ fn testUnion() !void {
try expect(notag_union_info.Union.layout == .Auto);
try expect(notag_union_info.Union.fields.len == 2);
try expect(notag_union_info.Union.fields[0].alignment == @alignOf(void));
- try expect(notag_union_info.Union.fields[1].field_type == u32);
+ try expect(notag_union_info.Union.fields[1].type == u32);
try expect(notag_union_info.Union.fields[1].alignment == @alignOf(u32));
const TestExternUnion = extern union {
@@ -281,11 +280,10 @@ fn testUnion() !void {
const extern_union_info = @typeInfo(TestExternUnion);
try expect(extern_union_info.Union.layout == .Extern);
try expect(extern_union_info.Union.tag_type == null);
- try expect(extern_union_info.Union.fields[0].field_type == *anyopaque);
+ try expect(extern_union_info.Union.fields[0].type == *anyopaque);
}
test "type info: struct info" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testStruct();
@@ -319,7 +317,7 @@ fn testPackedStruct() !void {
try expect(struct_info.Struct.backing_integer == u128);
try expect(struct_info.Struct.fields.len == 4);
try expect(struct_info.Struct.fields[0].alignment == 0);
- try expect(struct_info.Struct.fields[2].field_type == f32);
+ try expect(struct_info.Struct.fields[2].type == f32);
try expect(struct_info.Struct.fields[2].default_value == null);
try expect(@ptrCast(*align(1) const u32, struct_info.Struct.fields[3].default_value.?).* == 4);
try expect(struct_info.Struct.fields[3].alignment == 0);
@@ -365,7 +363,7 @@ fn testFunction() !void {
try expect(fn_info.Fn.alignment > 0);
try expect(fn_info.Fn.calling_convention == .C);
try expect(!fn_info.Fn.is_generic);
- try expect(fn_info.Fn.args.len == 2);
+ try expect(fn_info.Fn.params.len == 2);
try expect(fn_info.Fn.is_var_args);
try expect(fn_info.Fn.return_type.? == usize);
const fn_aligned_info = @typeInfo(@TypeOf(typeInfoFooAligned));
@@ -377,31 +375,31 @@ extern fn typeInfoFooAligned(a: usize, b: bool, ...) align(4) callconv(.C) usize
test "type info: generic function types" {
const G1 = @typeInfo(@TypeOf(generic1));
- try expect(G1.Fn.args.len == 1);
- try expect(G1.Fn.args[0].is_generic == true);
- try expect(G1.Fn.args[0].arg_type == null);
+ try expect(G1.Fn.params.len == 1);
+ try expect(G1.Fn.params[0].is_generic == true);
+ try expect(G1.Fn.params[0].type == null);
try expect(G1.Fn.return_type == void);
const G2 = @typeInfo(@TypeOf(generic2));
- try expect(G2.Fn.args.len == 3);
- try expect(G2.Fn.args[0].is_generic == false);
- try expect(G2.Fn.args[0].arg_type == type);
- try expect(G2.Fn.args[1].is_generic == true);
- try expect(G2.Fn.args[1].arg_type == null);
- try expect(G2.Fn.args[2].is_generic == false);
- try expect(G2.Fn.args[2].arg_type == u8);
+ try expect(G2.Fn.params.len == 3);
+ try expect(G2.Fn.params[0].is_generic == false);
+ try expect(G2.Fn.params[0].type == type);
+ try expect(G2.Fn.params[1].is_generic == true);
+ try expect(G2.Fn.params[1].type == null);
+ try expect(G2.Fn.params[2].is_generic == false);
+ try expect(G2.Fn.params[2].type == u8);
try expect(G2.Fn.return_type == void);
const G3 = @typeInfo(@TypeOf(generic3));
- try expect(G3.Fn.args.len == 1);
- try expect(G3.Fn.args[0].is_generic == true);
- try expect(G3.Fn.args[0].arg_type == null);
+ try expect(G3.Fn.params.len == 1);
+ try expect(G3.Fn.params[0].is_generic == true);
+ try expect(G3.Fn.params[0].type == null);
try expect(G3.Fn.return_type == null);
const G4 = @typeInfo(@TypeOf(generic4));
- try expect(G4.Fn.args.len == 1);
- try expect(G4.Fn.args[0].is_generic == true);
- try expect(G4.Fn.args[0].arg_type == null);
+ try expect(G4.Fn.params.len == 1);
+ try expect(G4.Fn.params[0].is_generic == true);
+ try expect(G4.Fn.params[0].type == null);
try expect(G4.Fn.return_type == null);
}
@@ -512,7 +510,6 @@ test "type info for async frames" {
test "Declarations are returned in declaration order" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -536,7 +533,6 @@ test "Struct.is_tuple for anon list literal" {
test "Struct.is_tuple for anon struct literal" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const info = @typeInfo(@TypeOf(.{ .a = 0 }));
diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig
index d93c7ea8a7..79838e02a6 100644
--- a/test/behavior/undefined.zig
+++ b/test/behavior/undefined.zig
@@ -16,7 +16,6 @@ test "init static array to undefined" {
// This test causes `initStaticArray()` to be codegen'd, and the
// C backend does not yet support returning arrays, so it fails
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(static_array[0] == 1);
@@ -79,7 +78,6 @@ test "assign undefined to struct with method" {
}
test "type name of undefined" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const x = undefined;
diff --git a/test/behavior/underscore.zig b/test/behavior/underscore.zig
index 0ed6c395bb..66b49e52d5 100644
--- a/test/behavior/underscore.zig
+++ b/test/behavior/underscore.zig
@@ -7,7 +7,6 @@ test "ignore lval with underscore" {
}
test "ignore lval with underscore (while loop)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
while (optionalReturnError()) |_| {
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index b6ec305eac..87691cf3cb 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -413,7 +413,6 @@ test "tagged union with no payloads" {
}
test "union with only 1 field casted to its enum type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Literal = union(enum) {
@@ -454,7 +453,7 @@ test "global union with single field is correctly initialized" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
glbl = Foo1{
- .f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 },
+ .f = @typeInfo(Foo1).Union.fields[0].type{ .x = 123 },
};
try expect(glbl.f.x == 123);
}
@@ -563,7 +562,6 @@ const Baz = enum { A, B, C, D };
test "tagged union type" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const foo1 = TaggedFoo{ .One = 13 };
@@ -697,7 +695,6 @@ const PartialInstWithPayload = union(enum) {
};
test "union with only 1 field casted to its enum type which has enum value specified" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Literal = union(enum) {
@@ -1474,3 +1471,36 @@ test "union int tag type is properly managed" {
};
try expect(@sizeOf(Bar) + 1 == 3);
}
+
+test "no dependency loop when function pointer in union returns the union" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const U = union(enum) {
+ const U = @This();
+ a: u8,
+ b: *const fn (x: U) void,
+ c: *const fn (x: U) U,
+ d: *const fn (x: u8) U,
+ e: *const fn (x: *U) void,
+ f: *const fn (x: *U) U,
+ fn foo(x: u8) U {
+ return .{ .a = x };
+ }
+ };
+ var b: U = .{ .d = U.foo };
+ try expect(b.d(2).a == 2);
+}
+
+test "union reassignment can use previous value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const U = union {
+ a: u32,
+ b: u32,
+ };
+ var a = U{ .a = 32 };
+ a = U{ .b = a.a };
+ try expect(a.b == 32);
+}
diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig
index 30912ee4fd..97f90b559d 100644
--- a/test/behavior/var_args.zig
+++ b/test/behavior/var_args.zig
@@ -1,5 +1,6 @@
const builtin = @import("builtin");
-const expect = @import("std").testing.expect;
+const std = @import("std");
+const expect = std.testing.expect;
fn add(args: anytype) i32 {
var sum = @as(i32, 0);
@@ -91,3 +92,118 @@ test "pass zero length array to var args param" {
fn doNothingWithFirstArg(args: anytype) void {
_ = args[0];
}
+
+test "simple variadic function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
+ // https://github.com/ziglang/zig/issues/14096
+ return error.SkipZigTest;
+ }
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn simple(...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ return @cVaArg(&ap, c_int);
+ }
+
+ fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ }
+ return sum;
+ }
+ };
+
+ try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0)));
+ try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024)));
+ try std.testing.expectEqual(@as(c_int, 0), S.add(0));
+ try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2)));
+}
+
+test "variadic functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
+ // https://github.com/ziglang/zig/issues/14096
+ return error.SkipZigTest;
+ }
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn printf(list_ptr: *std.ArrayList(u8), format: [*:0]const u8, ...) callconv(.C) void {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ vprintf(list_ptr, format, &ap);
+ }
+
+ fn vprintf(
+ list: *std.ArrayList(u8),
+ format: [*:0]const u8,
+ ap: *std.builtin.VaList,
+ ) callconv(.C) void {
+ for (std.mem.span(format)) |c| switch (c) {
+ 's' => {
+ const arg = @cVaArg(ap, [*:0]const u8);
+ list.writer().print("{s}", .{arg}) catch return;
+ },
+ 'd' => {
+ const arg = @cVaArg(ap, c_int);
+ list.writer().print("{d}", .{arg}) catch return;
+ },
+ else => unreachable,
+ };
+ }
+ };
+
+ var list = std.ArrayList(u8).init(std.testing.allocator);
+ defer list.deinit();
+ S.printf(&list, "dsd", @as(c_int, 1), @as([*:0]const u8, "hello"), @as(c_int, 5));
+ try std.testing.expectEqualStrings("1hello5", list.items);
+}
+
+test "copy VaList" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
+ // https://github.com/ziglang/zig/issues/14096
+ return error.SkipZigTest;
+ }
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var copy = @cVaCopy(&ap);
+ defer @cVaEnd(&copy);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ sum += @cVaArg(&copy, c_int) * 2;
+ }
+ return sum;
+ }
+ };
+
+ try std.testing.expectEqual(@as(c_int, 0), S.add(0));
+ try std.testing.expectEqual(@as(c_int, 3), S.add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 9), S.add(2, @as(c_int, 1), @as(c_int, 2)));
+}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 9bcc5f9b9e..ba3d98ee56 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -181,6 +181,12 @@ test "tuple to vector" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // Regressed with LLVM 14:
+ // https://github.com/ziglang/zig/issues/12012
+ return error.SkipZigTest;
+ }
+
const S = struct {
fn doTheTest() !void {
const Vec3 = @Vector(3, i32);
@@ -963,35 +969,31 @@ test "@addWithOverflow" {
const S = struct {
fn doTheTest() !void {
{
- var result: @Vector(4, u8) = undefined;
var lhs = @Vector(4, u8){ 250, 250, 250, 250 };
var rhs = @Vector(4, u8){ 0, 5, 6, 10 };
- var overflow = @addWithOverflow(@Vector(4, u8), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, false, true, true };
+ var overflow = @addWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 0, 1, 1 };
try expectEqual(expected, overflow);
}
{
- var result: @Vector(4, i8) = undefined;
var lhs = @Vector(4, i8){ -125, -125, 125, 125 };
var rhs = @Vector(4, i8){ -3, -4, 2, 3 };
- var overflow = @addWithOverflow(@Vector(4, i8), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, true, false, true };
+ var overflow = @addWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 1, 0, 1 };
try expectEqual(expected, overflow);
}
{
- var result: @Vector(4, u1) = undefined;
var lhs = @Vector(4, u1){ 0, 0, 1, 1 };
var rhs = @Vector(4, u1){ 0, 1, 0, 1 };
- var overflow = @addWithOverflow(@Vector(4, u1), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, false, false, true };
+ var overflow = @addWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 0, 0, 1 };
try expectEqual(expected, overflow);
}
{
- var result: @Vector(4, u0) = undefined;
var lhs = @Vector(4, u0){ 0, 0, 0, 0 };
var rhs = @Vector(4, u0){ 0, 0, 0, 0 };
- var overflow = @addWithOverflow(@Vector(4, u0), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, false, false, false };
+ var overflow = @addWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 0, 0, 0 };
try expectEqual(expected, overflow);
}
}
@@ -1010,19 +1012,17 @@ test "@subWithOverflow" {
const S = struct {
fn doTheTest() !void {
{
- var result: @Vector(2, u8) = undefined;
var lhs = @Vector(2, u8){ 5, 5 };
var rhs = @Vector(2, u8){ 5, 6 };
- var overflow = @subWithOverflow(@Vector(2, u8), lhs, rhs, &result);
- var expected: @Vector(2, bool) = .{ false, true };
+ var overflow = @subWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(2, u1) = .{ 0, 1 };
try expectEqual(expected, overflow);
}
{
- var result: @Vector(4, i8) = undefined;
var lhs = @Vector(4, i8){ -120, -120, 120, 120 };
var rhs = @Vector(4, i8){ 8, 9, -7, -8 };
- var overflow = @subWithOverflow(@Vector(4, i8), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, true, false, true };
+ var overflow = @subWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 1, 0, 1 };
try expectEqual(expected, overflow);
}
}
@@ -1040,11 +1040,10 @@ test "@mulWithOverflow" {
const S = struct {
fn doTheTest() !void {
- var result: @Vector(4, u8) = undefined;
var lhs = @Vector(4, u8){ 10, 10, 10, 10 };
var rhs = @Vector(4, u8){ 25, 26, 0, 30 };
- var overflow = @mulWithOverflow(@Vector(4, u8), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, true, false, true };
+ var overflow = @mulWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 1, 0, 1 };
try expectEqual(expected, overflow);
}
};
@@ -1062,11 +1061,10 @@ test "@shlWithOverflow" {
const S = struct {
fn doTheTest() !void {
- var result: @Vector(4, u8) = undefined;
var lhs = @Vector(4, u8){ 0, 1, 8, 255 };
var rhs = @Vector(4, u3){ 7, 7, 7, 7 };
- var overflow = @shlWithOverflow(@Vector(4, u8), lhs, rhs, &result);
- var expected: @Vector(4, bool) = .{ false, false, true, true };
+ var overflow = @shlWithOverflow(lhs, rhs)[1];
+ var expected: @Vector(4, u1) = .{ 0, 0, 1, 1 };
try expectEqual(expected, overflow);
}
};
@@ -1136,8 +1134,19 @@ test "byte vector initialized in inline function" {
}
test "byte vector initialized in inline function" {
- // TODO https://github.com/ziglang/zig/issues/13279
- if (true) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+
+ if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and
+ builtin.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.avx512f)))
+ {
+ // TODO https://github.com/ziglang/zig/issues/13279
+ return error.SkipZigTest;
+ }
const S = struct {
fn boolx4(e0: bool, e1: bool, e2: bool, e3: bool) @Vector(4, bool) {
@@ -1234,3 +1243,46 @@ test "array operands to shuffle are coerced to vectors" {
var b = @shuffle(u32, a, @splat(5, @as(u24, 0)), mask);
try expectEqual([_]u32{ 0, 3, 5, 7, 9 }, b);
}
+
+test "load packed vector element" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var x: @Vector(2, u15) = .{ 1, 4 };
+ try expect((&x[0]).* == 1);
+ try expect((&x[1]).* == 4);
+}
+
+test "store packed vector element" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var v = @Vector(4, u1){ 1, 1, 1, 1 };
+ try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);
+ v[0] = 0;
+ try expectEqual(@Vector(4, u1){ 0, 1, 1, 1 }, v);
+}
+
+test "store to vector in slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var v = [_]@Vector(3, f32){
+ .{ 1, 1, 1 },
+ .{ 0, 0, 0 },
+ };
+ var s: []@Vector(3, f32) = &v;
+ var i: usize = 1;
+ s[i] = s[0];
+ try expectEqual(v[1], v[0]);
+}
diff --git a/test/behavior/void.zig b/test/behavior/void.zig
index 97c95c1f9a..85a9178145 100644
--- a/test/behavior/void.zig
+++ b/test/behavior/void.zig
@@ -19,7 +19,6 @@ test "compare void with void compile time known" {
}
test "iterate over a void slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var j: usize = 0;
diff --git a/test/behavior/while.zig b/test/behavior/while.zig
index 4ec2b8e8f8..6a97f96763 100644
--- a/test/behavior/while.zig
+++ b/test/behavior/while.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const assert = std.debug.assert;
test "while loop" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var i: i32 = 0;
@@ -24,7 +23,6 @@ fn whileLoop2() i32 {
}
test "static eval while" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(static_eval_while_number == 1);
@@ -108,7 +106,6 @@ fn testBreakOuter() void {
test "while copies its payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -148,7 +145,6 @@ fn runContinueAndBreakTest() !void {
test "while with optional as condition" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
numbers_left = 10;
@@ -162,7 +158,6 @@ test "while with optional as condition" {
test "while with optional as condition with else" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
numbers_left = 10;
@@ -180,7 +175,6 @@ test "while with optional as condition with else" {
test "while with error union condition" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
numbers_left = 10;
@@ -263,7 +257,6 @@ fn returnWithImplicitCastFromWhileLoopTest() anyerror!void {
}
test "while on error union with else result follow else prong" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnError()) |value| {
@@ -273,7 +266,6 @@ test "while on error union with else result follow else prong" {
}
test "while on error union with else result follow break prong" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnSuccess(10)) |value| {
diff --git a/test/c_abi/cfuncs.c b/test/c_abi/cfuncs.c
index 20896669fe..16851f1c7e 100644
--- a/test/c_abi/cfuncs.c
+++ b/test/c_abi/cfuncs.c
@@ -742,6 +742,19 @@ SmallVec c_ret_small_vec(void) {
return (SmallVec){3, 4};
}
+typedef size_t MediumVec __attribute__((vector_size(4 * sizeof(size_t))));
+
+void c_medium_vec(MediumVec vec) {
+ assert_or_panic(vec[0] == 1);
+ assert_or_panic(vec[1] == 2);
+ assert_or_panic(vec[2] == 3);
+ assert_or_panic(vec[3] == 4);
+}
+
+MediumVec c_ret_medium_vec(void) {
+ return (MediumVec){5, 6, 7, 8};
+}
+
typedef size_t BigVec __attribute__((vector_size(8 * sizeof(size_t))));
void c_big_vec(BigVec vec) {
diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig
index 47f0455744..dcf4cbe46f 100644
--- a/test/c_abi/main.zig
+++ b/test/c_abi/main.zig
@@ -801,6 +801,23 @@ test "small simd vector" {
try expect(x[1] == 4);
}
+const MediumVec = @Vector(4, usize);
+
+extern fn c_medium_vec(MediumVec) void;
+extern fn c_ret_medium_vec() MediumVec;
+
+test "medium simd vector" {
+ if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
+
+ c_medium_vec(.{ 1, 2, 3, 4 });
+
+ var x = c_ret_medium_vec();
+ try expect(x[0] == 5);
+ try expect(x[1] == 6);
+ try expect(x[2] == 7);
+ try expect(x[3] == 8);
+}
+
const BigVec = @Vector(8, usize);
extern fn c_big_vec(BigVec) void;
diff --git a/test/cases/compile_errors/cImport_with_bogus_include.zig b/test/cases/compile_errors/cImport_with_bogus_include.zig
deleted file mode 100644
index e1283b11a8..0000000000
--- a/test/cases/compile_errors/cImport_with_bogus_include.zig
+++ /dev/null
@@ -1,9 +0,0 @@
-const c = @cImport(@cInclude("bogus.h"));
-export fn entry() usize { return @sizeOf(@TypeOf(c.bogo)); }
-
-// error
-// backend=llvm
-// target=native
-//
-// :1:11: error: C import failed
-// :1:10: error: 'bogus.h' file not found
diff --git a/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig b/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
index 7fd38280c6..f9df19802a 100644
--- a/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
+++ b/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
@@ -18,7 +18,7 @@ export fn entry() usize { return @sizeOf(@TypeOf(&foo)); }
// backend=stage2
// target=native
//
-// :8:16: error: expected type '*const u3', found '*align(0:3:1) const u3'
+// :8:16: error: expected type '*const u3', found '*align(1:3:1) const u3'
// :8:16: note: pointer host size '1' cannot cast into pointer host size '0'
// :8:16: note: pointer bit offset '3' cannot cast into pointer bit offset '0'
// :11:11: note: parameter type declared here
diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig
index b574df8833..aa52fb9756 100644
--- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig
+++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig
@@ -53,6 +53,21 @@ export fn foo_slice() void {
_ = slice;
}
}
+export fn undefined_slice() void {
+ const arr: [100]u16 = undefined;
+ const slice = arr[0..12 :0];
+ _ = slice;
+}
+export fn string_slice() void {
+ const str = "abcdefg";
+ const slice = str[0..1 :12];
+ _ = slice;
+}
+export fn typeName_slice() void {
+ const arr = @typeName(usize);
+ const slice = arr[0..2 :0];
+ _ = slice;
+}
// error
// backend=stage2
@@ -72,3 +87,9 @@ export fn foo_slice() void {
// :44:29: note: expected '255', found '0'
// :52:29: error: value in memory does not match slice sentinel
// :52:29: note: expected '255', found '0'
+// :58:22: error: value in memory does not match slice sentinel
+// :58:22: note: expected '0', found 'undefined'
+// :63:22: error: value in memory does not match slice sentinel
+// :63:22: note: expected '12', found '98'
+// :68:22: error: value in memory does not match slice sentinel
+// :68:22: note: expected '0', found '105'
diff --git a/test/cases/compile_errors/comptime_store_in_comptime_switch_in_runtime_if.zig b/test/cases/compile_errors/comptime_store_in_comptime_switch_in_runtime_if.zig
index abaff066ad..7951b4dec0 100644
--- a/test/cases/compile_errors/comptime_store_in_comptime_switch_in_runtime_if.zig
+++ b/test/cases/compile_errors/comptime_store_in_comptime_switch_in_runtime_if.zig
@@ -9,7 +9,7 @@ pub export fn entry() void {
const info = @typeInfo(Widget).Union;
inline for (info.fields) |field| {
if (foo()) {
- switch (field.field_type) {
+ switch (field.type) {
u0 => a = 2,
else => unreachable,
}
diff --git a/test/cases/compile_errors/control_flow_uses_comptime_var_at_runtime.zig b/test/cases/compile_errors/control_flow_uses_comptime_var_at_runtime.zig
index 43ef20873c..a8058e8c75 100644
--- a/test/cases/compile_errors/control_flow_uses_comptime_var_at_runtime.zig
+++ b/test/cases/compile_errors/control_flow_uses_comptime_var_at_runtime.zig
@@ -6,6 +6,27 @@ export fn foo() void {
}
fn bar() void { }
+export fn baz() void {
+ comptime var idx: u32 = 0;
+ while (idx < 1) {
+ const not_null: ?u32 = 1;
+ _ = not_null orelse return;
+ idx += 1;
+ }
+}
+
+export fn qux() void {
+ comptime var i = 0;
+ while (i < 3) : (i += 1) {
+ const T = switch (i) {
+ 0 => f32,
+ 1 => i8,
+ 2 => bool,
+ else => unreachable,
+ };
+ _ = T;
+ }
+}
// error
// backend=stage2
@@ -13,3 +34,7 @@ fn bar() void { }
//
// :3:24: error: cannot store to comptime variable in non-inline loop
// :3:5: note: non-inline loop here
+// :14:13: error: cannot store to comptime variable in non-inline loop
+// :11:5: note: non-inline loop here
+// :20:24: error: cannot store to comptime variable in non-inline loop
+// :20:5: note: non-inline loop here
diff --git a/test/cases/compile_errors/dereference_anyopaque.zig b/test/cases/compile_errors/dereference_anyopaque.zig
index df58a11085..6dbbdfe1e2 100644
--- a/test/cases/compile_errors/dereference_anyopaque.zig
+++ b/test/cases/compile_errors/dereference_anyopaque.zig
@@ -16,7 +16,7 @@ fn parseFree(comptime T: type, value: T, allocator: std.mem.Allocator) void {
.Struct => |structInfo| {
inline for (structInfo.fields) |field| {
if (!field.is_comptime)
- parseFree(field.field_type, undefined, allocator);
+ parseFree(field.type, undefined, allocator);
}
},
.Pointer => |ptrInfo| {
diff --git a/test/cases/compile_errors/error_in_comptime_call_in_container_level_initializer.zig b/test/cases/compile_errors/error_in_comptime_call_in_container_level_initializer.zig
new file mode 100644
index 0000000000..2b61f45a31
--- /dev/null
+++ b/test/cases/compile_errors/error_in_comptime_call_in_container_level_initializer.zig
@@ -0,0 +1,22 @@
+const std = @import("std");
+const Version = std.SemanticVersion;
+const print = @import("std").debug.print;
+
+fn readVersion() Version {
+ const version_file = "foo";
+ const len = std.mem.indexOfAny(u8, version_file, " \n") orelse version_file.len;
+ const version_string = version_file[0..len];
+ return Version.parse(version_string) catch unreachable;
+}
+
+const version: Version = readVersion();
+pub export fn entry() void {
+ print("Version {}.{}.{}+{?s}\n", .{ version.major, version.minor, version.patch, version.build });
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :9:48: error: caught unexpected error 'InvalidVersion'
+// :12:37: note: called from here
diff --git a/test/cases/compile_errors/generic_instantiation_failure.zig b/test/cases/compile_errors/generic_instantiation_failure.zig
new file mode 100644
index 0000000000..42e4c4e8c8
--- /dev/null
+++ b/test/cases/compile_errors/generic_instantiation_failure.zig
@@ -0,0 +1,27 @@
+fn List(comptime Head: type, comptime Tail: type) type {
+ return union {
+ const Self = @This();
+ head: Head,
+ tail: Tail,
+
+ fn AppendReturnType(comptime item: anytype) type {
+ return List(Head, List(@TypeOf(item), void));
+ }
+ };
+}
+
+fn makeList(item: anytype) List(@TypeOf(item), void) {
+ return List(@TypeOf(item), void){ .head = item };
+}
+
+pub export fn entry() void {
+ @TypeOf(makeList(42)).AppendReturnType(64);
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :18:43: error: value of type 'type' ignored
+// :18:43: note: all non-void values must be used
+// :18:43: note: this error can be suppressed by assigning the value to '_'
diff --git a/test/cases/compile_errors/generic_instantiation_failure_in_generic_function_return_type.zig b/test/cases/compile_errors/generic_instantiation_failure_in_generic_function_return_type.zig
new file mode 100644
index 0000000000..3146c38604
--- /dev/null
+++ b/test/cases/compile_errors/generic_instantiation_failure_in_generic_function_return_type.zig
@@ -0,0 +1,14 @@
+const std = @import("std");
+
+pub export fn entry() void {
+ var ohnoes: *usize = undefined;
+ _ = sliceAsBytes(ohnoes);
+}
+fn sliceAsBytes(slice: anytype) std.meta.trait.isPtrTo(.Array)(@TypeOf(slice)) {}
+
+
+// error
+// backend=llvm
+// target=native
+//
+// :7:63: error: expected type 'type', found 'bool'
diff --git a/test/cases/compile_errors/ignored_comptime_value.zig b/test/cases/compile_errors/ignored_comptime_value.zig
index 2f7c6edea3..e1162459e6 100644
--- a/test/cases/compile_errors/ignored_comptime_value.zig
+++ b/test/cases/compile_errors/ignored_comptime_value.zig
@@ -1,6 +1,18 @@
-export fn foo() void {
+export fn a() void {
comptime 1;
}
+export fn b() void {
+ comptime bar();
+}
+fn bar() u8 {
+ const u32_max = @import("std").math.maxInt(u32);
+
+ @setEvalBranchQuota(u32_max);
+ var x: u32 = 0;
+ while (x != u32_max) : (x +%= 1) {}
+
+ return 0;
+}
// error
// backend=stage2
@@ -9,3 +21,6 @@ export fn foo() void {
// :2:5: error: value of type 'comptime_int' ignored
// :2:5: note: all non-void values must be used
// :2:5: note: this error can be suppressed by assigning the value to '_'
+// :5:17: error: value of type 'u8' ignored
+// :5:17: note: all non-void values must be used
+// :5:17: note: this error can be suppressed by assigning the value to '_'
diff --git a/test/cases/compile_errors/implicit_cast_const_array_to_mutable_slice.zig b/test/cases/compile_errors/implicit_cast_const_array_to_mutable_slice.zig
index 2a80b9ffbc..6f67f72525 100644
--- a/test/cases/compile_errors/implicit_cast_const_array_to_mutable_slice.zig
+++ b/test/cases/compile_errors/implicit_cast_const_array_to_mutable_slice.zig
@@ -13,6 +13,11 @@ export fn entry2() void {
const many: [*]u8 = str;
_ = many;
}
+export fn entry3() void {
+ const lang: []const u8 = "lang";
+ const targets: [1][]const u8 = [_][]u8{lang};
+ _ = targets;
+}
// error
// backend=stage2
@@ -24,3 +29,5 @@ export fn entry2() void {
// :8:27: note: cast discards const qualifier
// :13:25: error: expected type '[*]u8', found '*const [0:0]u8'
// :13:25: note: cast discards const qualifier
+// :18:44: error: expected type '[]u8', found '[]const u8'
+// :18:44: note: cast discards const qualifier
diff --git a/test/cases/compile_errors/incompatible sub-byte fields.zig b/test/cases/compile_errors/incompatible sub-byte fields.zig
index 20fbf5e30c..d765b7cf45 100644
--- a/test/cases/compile_errors/incompatible sub-byte fields.zig
+++ b/test/cases/compile_errors/incompatible sub-byte fields.zig
@@ -24,4 +24,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :14:17: error: incompatible types: '*align(0:0:1) u2' and '*align(2:8:2) u2'
+// :14:17: error: incompatible types: '*align(1:0:1) u2' and '*align(2:8:2) u2'
diff --git a/test/cases/compile_errors/intToEnum_on_non-exhaustive_enums_checks_int_in_range.zig b/test/cases/compile_errors/intToEnum_on_non-exhaustive_enums_checks_int_in_range.zig
new file mode 100644
index 0000000000..b05c9f35d9
--- /dev/null
+++ b/test/cases/compile_errors/intToEnum_on_non-exhaustive_enums_checks_int_in_range.zig
@@ -0,0 +1,11 @@
+pub export fn entry() void {
+ const E = enum(u3) { a, b, c, _ };
+ @compileLog(@intToEnum(E, 100));
+}
+
+// error
+// target=native
+// backend=stage2
+//
+// :3:17: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E'
+// :2:15: note: enum declared here
diff --git a/test/cases/compile_errors/invalid_capture_type.zig b/test/cases/compile_errors/invalid_capture_type.zig
new file mode 100644
index 0000000000..3813021c95
--- /dev/null
+++ b/test/cases/compile_errors/invalid_capture_type.zig
@@ -0,0 +1,24 @@
+export fn f1() void {
+ if (true) |x| { _ = x; }
+}
+export fn f2() void {
+ if (@as(usize, 5)) |_| {}
+}
+export fn f3() void {
+ if (@as(usize, 5)) |_| {} else |_| {}
+}
+export fn f4() void {
+ if (null) |_| {}
+}
+export fn f5() void {
+ if (error.Foo) |_| {} else |_| {}
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:9: error: expected optional type, found 'bool'
+// :5:9: error: expected optional type, found 'usize'
+// :8:9: error: expected error union type, found 'usize'
+// :14:9: error: expected error union type, found 'error{Foo}'
diff --git a/test/cases/compile_errors/invalid_store_to_comptime_field.zig b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
index a86cb500d2..89deda92d4 100644
--- a/test/cases/compile_errors/invalid_store_to_comptime_field.zig
+++ b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
@@ -61,6 +61,11 @@ pub export fn entry6() void {
};
_ = State.init(false);
}
+pub export fn entry7() void {
+ const list1 = .{ "sss", 1, 2, 3 };
+ const list2 = @TypeOf(list1){ .@"0" = "xxx", .@"1" = 4, .@"2" = 5, .@"3" = 6 };
+ _ = list2;
+}
// error
// target=native
@@ -73,4 +78,5 @@ pub export fn entry6() void {
// :25:29: note: default value set here
// :41:16: error: value stored in comptime field does not match the default value of the field
// :45:12: error: value stored in comptime field does not match the default value of the field
+// :66:43: error: value stored in comptime field does not match the default value of the field
// :59:35: error: value stored in comptime field does not match the default value of the field
diff --git a/test/cases/compile_errors/invalid_struct_field.zig b/test/cases/compile_errors/invalid_struct_field.zig
index d351a012f9..4450375cb8 100644
--- a/test/cases/compile_errors/invalid_struct_field.zig
+++ b/test/cases/compile_errors/invalid_struct_field.zig
@@ -1,15 +1,22 @@
-const A = struct { x : i32, };
+const A = struct { x: i32 };
export fn f() void {
- var a : A = undefined;
+ var a: A = undefined;
a.foo = 1;
const y = a.bar;
_ = y;
}
export fn g() void {
- var a : A = undefined;
+ var a: A = undefined;
const y = a.bar;
_ = y;
}
+export fn e() void {
+ const B = struct {
+ fn f() void {}
+ };
+ const b: B = undefined;
+ @import("std").debug.print("{}{}", .{ b.f, b.f });
+}
// error
// backend=stage2
@@ -18,4 +25,5 @@ export fn g() void {
// :4:7: error: no field named 'foo' in struct 'tmp.A'
// :1:11: note: struct declared here
// :10:17: error: no field named 'bar' in struct 'tmp.A'
-
+// :18:45: error: no field named 'f' in struct 'tmp.e.B'
+// :14:15: note: struct declared here
diff --git a/test/cases/compile_errors/invalid_variadic_function.zig b/test/cases/compile_errors/invalid_variadic_function.zig
new file mode 100644
index 0000000000..997db9fee8
--- /dev/null
+++ b/test/cases/compile_errors/invalid_variadic_function.zig
@@ -0,0 +1,12 @@
+fn foo(...) void {}
+fn bar(a: anytype, ...) callconv(a) void {}
+
+comptime { _ = foo; }
+comptime { _ = bar; }
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:1: error: variadic function must have 'C' calling convention
+// :2:1: error: generic function cannot be variadic
diff --git a/test/cases/compile_errors/load_vector_pointer_with_unknown_runtime_index.zig b/test/cases/compile_errors/load_vector_pointer_with_unknown_runtime_index.zig
new file mode 100644
index 0000000000..d2182d8ad0
--- /dev/null
+++ b/test/cases/compile_errors/load_vector_pointer_with_unknown_runtime_index.zig
@@ -0,0 +1,17 @@
+export fn entry() void {
+ var v: @Vector(4, i31) = [_]i31{ 1, 5, 3, undefined };
+
+ var i: u32 = 0;
+ var x = loadv(&v[i]);
+ _ = x;
+}
+
+fn loadv(ptr: anytype) i31 {
+ return ptr.*;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :10:15: error: unable to determine vector element index of type '*align(16:0:4:?) i31'
diff --git a/test/cases/compile_errors/noalias_on_non_pointer_param.zig b/test/cases/compile_errors/noalias_on_non_pointer_param.zig
index f637013c93..806808820f 100644
--- a/test/cases/compile_errors/noalias_on_non_pointer_param.zig
+++ b/test/cases/compile_errors/noalias_on_non_pointer_param.zig
@@ -1,6 +1,12 @@
fn f(noalias x: i32) void { _ = x; }
export fn entry() void { f(1234); }
+fn generic(comptime T: type, noalias _: [*]T, noalias _: [*]const T, _: usize) void {}
+comptime { _ = generic; }
+
+fn slice(noalias _: []u8) void {}
+comptime { _ = slice; }
+
// error
// backend=stage2
// target=native
diff --git a/test/cases/compile_errors/packed_struct_field_alignment_unavailable_for_reify_type.zig b/test/cases/compile_errors/packed_struct_field_alignment_unavailable_for_reify_type.zig
index 8c17e9100b..9f0fb6773d 100644
--- a/test/cases/compile_errors/packed_struct_field_alignment_unavailable_for_reify_type.zig
+++ b/test/cases/compile_errors/packed_struct_field_alignment_unavailable_for_reify_type.zig
@@ -1,6 +1,6 @@
export fn entry() void {
_ = @Type(.{ .Struct = .{ .layout = .Packed, .fields = &.{
- .{ .name = "one", .field_type = u4, .default_value = null, .is_comptime = false, .alignment = 2 },
+ .{ .name = "one", .type = u4, .default_value = null, .is_comptime = false, .alignment = 2 },
}, .decls = &.{}, .is_tuple = false } });
}
diff --git a/test/cases/compile_errors/recursive_inline_fn.zig b/test/cases/compile_errors/recursive_inline_fn.zig
new file mode 100644
index 0000000000..2308bbdbc7
--- /dev/null
+++ b/test/cases/compile_errors/recursive_inline_fn.zig
@@ -0,0 +1,18 @@
+inline fn foo(x: i32) i32 {
+ if (x <= 0) {
+ return 0;
+ } else {
+ return x * 2 + foo(x - 1);
+ }
+}
+
+pub export fn entry() void {
+ var x: i32 = 4;
+ _ = foo(x) == 20;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :5:27: error: inline call is recursive
diff --git a/test/cases/compile_errors/reference_to_const_data.zig b/test/cases/compile_errors/reference_to_const_data.zig
index b7f2e93fbd..cbc0fe131c 100644
--- a/test/cases/compile_errors/reference_to_const_data.zig
+++ b/test/cases/compile_errors/reference_to_const_data.zig
@@ -18,6 +18,10 @@ export fn qux() void {
var ptr = &S{.x=1,.y=2};
ptr.x = 2;
}
+export fn quux() void {
+ var x = &@returnAddress();
+ x.* = 6;
+}
// error
// backend=stage2
@@ -27,3 +31,4 @@ export fn qux() void {
// :7:8: error: cannot assign to constant
// :11:8: error: cannot assign to constant
// :19:8: error: cannot assign to constant
+// :23:6: error: cannot assign to constant
diff --git a/test/cases/compile_errors/reified_enum_field_value_overflow.zig b/test/cases/compile_errors/reified_enum_field_value_overflow.zig
index ad8596ebcc..d9f0e2057f 100644
--- a/test/cases/compile_errors/reified_enum_field_value_overflow.zig
+++ b/test/cases/compile_errors/reified_enum_field_value_overflow.zig
@@ -1,6 +1,5 @@
comptime {
const E = @Type(.{ .Enum = .{
- .layout = .Auto,
.tag_type = u1,
.fields = &.{
.{ .name = "f0", .value = 0 },
diff --git a/test/cases/compile_errors/reify_enum_with_duplicate_field.zig b/test/cases/compile_errors/reify_enum_with_duplicate_field.zig
index f8cadd9185..a4779b65ef 100644
--- a/test/cases/compile_errors/reify_enum_with_duplicate_field.zig
+++ b/test/cases/compile_errors/reify_enum_with_duplicate_field.zig
@@ -1,7 +1,6 @@
export fn entry() void {
_ = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u32,
.fields = &.{
.{ .name = "A", .value = 0 },
diff --git a/test/cases/compile_errors/reify_enum_with_duplicate_tag_value.zig b/test/cases/compile_errors/reify_enum_with_duplicate_tag_value.zig
index c3211fe301..b9be7cdaed 100644
--- a/test/cases/compile_errors/reify_enum_with_duplicate_tag_value.zig
+++ b/test/cases/compile_errors/reify_enum_with_duplicate_tag_value.zig
@@ -1,7 +1,6 @@
export fn entry() void {
_ = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u32,
.fields = &.{
.{ .name = "A", .value = 10 },
diff --git a/test/cases/compile_errors/reify_struct.zig b/test/cases/compile_errors/reify_struct.zig
index 1c6001ced6..42b8e42af5 100644
--- a/test/cases/compile_errors/reify_struct.zig
+++ b/test/cases/compile_errors/reify_struct.zig
@@ -3,7 +3,7 @@ comptime {
.layout = .Auto,
.fields = &.{.{
.name = "foo",
- .field_type = u32,
+ .type = u32,
.default_value = null,
.is_comptime = false,
.alignment = 4,
@@ -17,7 +17,7 @@ comptime {
.layout = .Auto,
.fields = &.{.{
.name = "3",
- .field_type = u32,
+ .type = u32,
.default_value = null,
.is_comptime = false,
.alignment = 4,
@@ -31,7 +31,7 @@ comptime {
.layout = .Auto,
.fields = &.{.{
.name = "0",
- .field_type = u32,
+ .type = u32,
.default_value = null,
.is_comptime = true,
.alignment = 4,
@@ -45,7 +45,7 @@ comptime {
.layout = .Extern,
.fields = &.{.{
.name = "0",
- .field_type = u32,
+ .type = u32,
.default_value = null,
.is_comptime = true,
.alignment = 4,
@@ -59,7 +59,7 @@ comptime {
.layout = .Packed,
.fields = &.{.{
.name = "0",
- .field_type = u32,
+ .type = u32,
.default_value = null,
.is_comptime = true,
.alignment = 4,
diff --git a/test/cases/compile_errors/reify_type.Fn_with_is_generic_true.zig b/test/cases/compile_errors/reify_type.Fn_with_is_generic_true.zig
index cf80c9f4ba..abdccdf36d 100644
--- a/test/cases/compile_errors/reify_type.Fn_with_is_generic_true.zig
+++ b/test/cases/compile_errors/reify_type.Fn_with_is_generic_true.zig
@@ -5,7 +5,7 @@ const Foo = @Type(.{
.is_generic = true,
.is_var_args = false,
.return_type = u0,
- .args = &.{},
+ .params = &.{},
},
});
comptime { _ = Foo; }
diff --git a/test/cases/compile_errors/reify_type.Fn_with_is_var_args_true_and_non-C_callconv.zig b/test/cases/compile_errors/reify_type.Fn_with_is_var_args_true_and_non-C_callconv.zig
index 8328ee9b97..f3542d583a 100644
--- a/test/cases/compile_errors/reify_type.Fn_with_is_var_args_true_and_non-C_callconv.zig
+++ b/test/cases/compile_errors/reify_type.Fn_with_is_var_args_true_and_non-C_callconv.zig
@@ -5,7 +5,7 @@ const Foo = @Type(.{
.is_generic = false,
.is_var_args = true,
.return_type = u0,
- .args = &.{},
+ .params = &.{},
},
});
comptime { _ = Foo; }
diff --git a/test/cases/compile_errors/reify_type.Fn_with_return_type_null.zig b/test/cases/compile_errors/reify_type.Fn_with_return_type_null.zig
index f6587dcd7e..49335ab693 100644
--- a/test/cases/compile_errors/reify_type.Fn_with_return_type_null.zig
+++ b/test/cases/compile_errors/reify_type.Fn_with_return_type_null.zig
@@ -5,7 +5,7 @@ const Foo = @Type(.{
.is_generic = false,
.is_var_args = false,
.return_type = null,
- .args = &.{},
+ .params = &.{},
},
});
comptime { _ = Foo; }
diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig
index e72b783d83..60c6ce9a59 100644
--- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig
+++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig
@@ -1,6 +1,5 @@
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = bool,
.fields = &.{},
.decls = &.{},
diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig
index 1c237a17bd..896d689046 100644
--- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig
+++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig
@@ -1,6 +1,5 @@
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = undefined,
.fields = &.{},
.decls = &.{},
diff --git a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig
index ccd0000494..96da0752df 100644
--- a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig
+++ b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig
@@ -1,6 +1,5 @@
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u2,
.fields = &.{
.{ .name = "signed", .value = 0 },
@@ -16,8 +15,8 @@ const Tagged = @Type(.{
.layout = .Auto,
.tag_type = Tag,
.fields = &.{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "signed", .type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .type = u32, .alignment = @alignOf(u32) },
},
.decls = &.{},
},
@@ -31,6 +30,6 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :14:16: error: enum field(s) missing in union
+// :13:16: error: enum field(s) missing in union
// :1:13: note: field 'arst' missing, declared here
// :1:13: note: enum declared here
diff --git a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
index 9d6170b9d0..559eb81fcd 100644
--- a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
+++ b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
@@ -1,6 +1,5 @@
const Tag = @Type(.{
.Enum = .{
- .layout = .Auto,
.tag_type = u1,
.fields = &.{
.{ .name = "signed", .value = 0 },
@@ -15,9 +14,9 @@ const Tagged = @Type(.{
.layout = .Auto,
.tag_type = Tag,
.fields = &.{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
- .{ .name = "arst", .field_type = f32, .alignment = @alignOf(f32) },
+ .{ .name = "signed", .type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .type = u32, .alignment = @alignOf(u32) },
+ .{ .name = "arst", .type = f32, .alignment = @alignOf(f32) },
},
.decls = &.{},
},
@@ -31,5 +30,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :13:16: error: no field named 'arst' in enum 'tmp.Tag'
+// :12:16: error: no field named 'arst' in enum 'tmp.Tag'
// :1:13: note: enum declared here
diff --git a/test/cases/compile_errors/reify_type_for_union_with_opaque_field.zig b/test/cases/compile_errors/reify_type_for_union_with_opaque_field.zig
index aba11031a4..df70934871 100644
--- a/test/cases/compile_errors/reify_type_for_union_with_opaque_field.zig
+++ b/test/cases/compile_errors/reify_type_for_union_with_opaque_field.zig
@@ -3,7 +3,7 @@ const Untagged = @Type(.{
.layout = .Auto,
.tag_type = null,
.fields = &.{
- .{ .name = "foo", .field_type = opaque {}, .alignment = 1 },
+ .{ .name = "foo", .type = opaque {}, .alignment = 1 },
},
.decls = &.{},
},
@@ -17,4 +17,4 @@ export fn entry() usize {
// target=native
//
// :1:18: error: opaque types have unknown size and therefore cannot be directly embedded in unions
-// :6:45: note: opaque declared here
+// :6:39: note: opaque declared here
diff --git a/test/cases/compile_errors/return_incompatible_generic_struct.zig b/test/cases/compile_errors/return_incompatible_generic_struct.zig
new file mode 100644
index 0000000000..f46d44d53f
--- /dev/null
+++ b/test/cases/compile_errors/return_incompatible_generic_struct.zig
@@ -0,0 +1,20 @@
+fn A(comptime T: type) type {
+ return struct { a: T };
+}
+fn B(comptime T: type) type {
+ return struct { b: T };
+}
+fn foo() A(u32) {
+ return B(u32){ .b = 1 };
+}
+export fn entry() void {
+ _ = foo();
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :8:18: error: expected type 'tmp.A(u32)', found 'tmp.B(u32)'
+// :5:12: note: struct declared here
+// :2:12: note: struct declared here
diff --git a/test/cases/compile_errors/stage1/obj/invalid_maybe_type.zig b/test/cases/compile_errors/stage1/obj/invalid_maybe_type.zig
deleted file mode 100644
index cb075c36b7..0000000000
--- a/test/cases/compile_errors/stage1/obj/invalid_maybe_type.zig
+++ /dev/null
@@ -1,9 +0,0 @@
-export fn f() void {
- if (true) |x| { _ = x; }
-}
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:2:9: error: expected optional type, found 'bool'
diff --git a/test/cases/compile_errors/stage1/obj/load_vector_pointer_with_unknown_runtime_index.zig b/test/cases/compile_errors/stage1/obj/load_vector_pointer_with_unknown_runtime_index.zig
deleted file mode 100644
index 8d703b09e6..0000000000
--- a/test/cases/compile_errors/stage1/obj/load_vector_pointer_with_unknown_runtime_index.zig
+++ /dev/null
@@ -1,17 +0,0 @@
-export fn entry() void {
- var v: @import("std").meta.Vector(4, i32) = [_]i32{ 1, 5, 3, undefined };
-
- var i: u32 = 0;
- var x = loadv(&v[i]);
- _ = x;
-}
-
-fn loadv(ptr: anytype) i32 {
- return ptr.*;
-}
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:10:12: error: unable to determine vector element index of type '*align(16:0:4:?) i32
diff --git a/test/cases/compile_errors/stage1/obj/store_vector_pointer_with_unknown_runtime_index.zig b/test/cases/compile_errors/stage1/obj/store_vector_pointer_with_unknown_runtime_index.zig
deleted file mode 100644
index 57e91631b1..0000000000
--- a/test/cases/compile_errors/stage1/obj/store_vector_pointer_with_unknown_runtime_index.zig
+++ /dev/null
@@ -1,16 +0,0 @@
-export fn entry() void {
- var v: @import("std").meta.Vector(4, i32) = [_]i32{ 1, 5, 3, undefined };
-
- var i: u32 = 0;
- storev(&v[i], 42);
-}
-
-fn storev(ptr: anytype, val: i32) void {
- ptr.* = val;
-}
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:9:8: error: unable to determine vector element index of type '*align(16:0:4:?) i32
diff --git a/test/cases/compile_errors/store_vector_pointer_with_unknown_runtime_index.zig b/test/cases/compile_errors/store_vector_pointer_with_unknown_runtime_index.zig
new file mode 100644
index 0000000000..0007ff1327
--- /dev/null
+++ b/test/cases/compile_errors/store_vector_pointer_with_unknown_runtime_index.zig
@@ -0,0 +1,16 @@
+export fn entry() void {
+ var v: @Vector(4, i31) = [_]i31{ 1, 5, 3, undefined };
+
+ var i: u32 = 0;
+ storev(&v[i], 42);
+}
+
+fn storev(ptr: anytype, val: i31) void {
+ ptr.* = val;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :9:8: error: unable to determine vector element index of type '*align(16:0:4:?) i31'
diff --git a/test/cases/fn_typeinfo_passed_to_comptime_fn.zig b/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
index 29f1d0c67b..31673e5b81 100644
--- a/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
+++ b/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
@@ -9,7 +9,7 @@ fn someFn(arg: ?*c_int) f64 {
return 8;
}
fn foo(comptime info: std.builtin.Type) !void {
- try std.testing.expect(info.Fn.args[0].arg_type.? == ?*c_int);
+ try std.testing.expect(info.Fn.params[0].type.? == ?*c_int);
}
// run
diff --git a/test/cases/plan9/hello_world_with_updates.1.zig b/test/cases/plan9/hello_world_with_updates.1.zig
index 4111a8dc08..410045ab79 100644
--- a/test/cases/plan9/hello_world_with_updates.1.zig
+++ b/test/cases/plan9/hello_world_with_updates.1.zig
@@ -5,6 +5,7 @@ pub fn main() void {
}
// run
+// target=x86_64-plan9
//
// Hello World
//
diff --git a/test/link.zig b/test/link.zig
index 7eec02e53a..5e26ae728d 100644
--- a/test/link.zig
+++ b/test/link.zig
@@ -42,6 +42,16 @@ fn addWasmCases(cases: *tests.StandaloneContext) void {
.requires_stage2 = true,
});
+ cases.addBuildFile("test/link/wasm/export/build.zig", .{
+ .build_modes = true,
+ .requires_stage2 = true,
+ });
+
+ // TODO: Fix open handle in wasm-linker refraining rename from working on Windows.
+ if (builtin.os.tag != .windows) {
+ cases.addBuildFile("test/link/wasm/export-data/build.zig", .{});
+ }
+
cases.addBuildFile("test/link/wasm/extern/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
@@ -53,6 +63,11 @@ fn addWasmCases(cases: *tests.StandaloneContext) void {
.requires_stage2 = true,
});
+ cases.addBuildFile("test/link/wasm/function-table/build.zig", .{
+ .build_modes = true,
+ .requires_stage2 = true,
+ });
+
cases.addBuildFile("test/link/wasm/infer-features/build.zig", .{
.requires_stage2 = true,
});
@@ -165,11 +180,21 @@ fn addMachOCases(cases: *tests.StandaloneContext) void {
.requires_symlinks = true,
});
+ cases.addBuildFile("test/link/macho/strict_validation/build.zig", .{
+ .build_modes = true,
+ .requires_symlinks = true,
+ });
+
cases.addBuildFile("test/link/macho/tls/build.zig", .{
.build_modes = true,
.requires_symlinks = true,
});
+ cases.addBuildFile("test/link/macho/uuid/build.zig", .{
+ .build_modes = false,
+ .requires_symlinks = true,
+ });
+
cases.addBuildFile("test/link/macho/weak_library/build.zig", .{
.build_modes = true,
.requires_symlinks = true,
diff --git a/test/link/macho/strict_validation/build.zig b/test/link/macho/strict_validation/build.zig
new file mode 100644
index 0000000000..0ea150252c
--- /dev/null
+++ b/test/link/macho/strict_validation/build.zig
@@ -0,0 +1,119 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Builder = std.build.Builder;
+const LibExeObjectStep = std.build.LibExeObjStep;
+
+pub fn build(b: *Builder) void {
+ const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+
+ const test_step = b.step("test", "Test");
+ test_step.dependOn(b.getInstallStep());
+
+ const exe = b.addExecutable("main", "main.zig");
+ exe.setBuildMode(mode);
+ exe.setTarget(target);
+ exe.linkLibC();
+
+ const check_exe = exe.checkObject(.macho);
+
+ check_exe.checkStart("cmd SEGMENT_64");
+ check_exe.checkNext("segname __LINKEDIT");
+ check_exe.checkNext("fileoff {fileoff}");
+ check_exe.checkNext("filesz {filesz}");
+
+ check_exe.checkStart("cmd DYLD_INFO_ONLY");
+ check_exe.checkNext("rebaseoff {rebaseoff}");
+ check_exe.checkNext("rebasesize {rebasesize}");
+ check_exe.checkNext("bindoff {bindoff}");
+ check_exe.checkNext("bindsize {bindsize}");
+ check_exe.checkNext("lazybindoff {lazybindoff}");
+ check_exe.checkNext("lazybindsize {lazybindsize}");
+ check_exe.checkNext("exportoff {exportoff}");
+ check_exe.checkNext("exportsize {exportsize}");
+
+ check_exe.checkStart("cmd FUNCTION_STARTS");
+ check_exe.checkNext("dataoff {fstartoff}");
+ check_exe.checkNext("datasize {fstartsize}");
+
+ check_exe.checkStart("cmd DATA_IN_CODE");
+ check_exe.checkNext("dataoff {diceoff}");
+ check_exe.checkNext("datasize {dicesize}");
+
+ check_exe.checkStart("cmd SYMTAB");
+ check_exe.checkNext("symoff {symoff}");
+ check_exe.checkNext("nsyms {symnsyms}");
+ check_exe.checkNext("stroff {stroff}");
+ check_exe.checkNext("strsize {strsize}");
+
+ check_exe.checkStart("cmd DYSYMTAB");
+ check_exe.checkNext("indirectsymoff {dysymoff}");
+ check_exe.checkNext("nindirectsyms {dysymnsyms}");
+
+ switch (builtin.cpu.arch) {
+ .aarch64 => {
+ check_exe.checkStart("cmd CODE_SIGNATURE");
+ check_exe.checkNext("dataoff {codesigoff}");
+ check_exe.checkNext("datasize {codesigsize}");
+ },
+ .x86_64 => {},
+ else => unreachable,
+ }
+
+ // DYLD_INFO_ONLY subsections are in order: rebase < bind < lazy < export,
+ // and there are no gaps between them
+ check_exe.checkComputeCompare("rebaseoff rebasesize +", .{ .op = .eq, .value = .{ .variable = "bindoff" } });
+ check_exe.checkComputeCompare("bindoff bindsize +", .{ .op = .eq, .value = .{ .variable = "lazybindoff" } });
+ check_exe.checkComputeCompare("lazybindoff lazybindsize +", .{ .op = .eq, .value = .{ .variable = "exportoff" } });
+
+ // FUNCTION_STARTS directly follows DYLD_INFO_ONLY (no gap)
+ check_exe.checkComputeCompare("exportoff exportsize +", .{ .op = .eq, .value = .{ .variable = "fstartoff" } });
+
+ // DATA_IN_CODE directly follows FUNCTION_STARTS (no gap)
+ check_exe.checkComputeCompare("fstartoff fstartsize +", .{ .op = .eq, .value = .{ .variable = "diceoff" } });
+
+ // SYMTAB directly follows DATA_IN_CODE (no gap)
+ check_exe.checkComputeCompare("diceoff dicesize +", .{ .op = .eq, .value = .{ .variable = "symoff" } });
+
+ // DYSYMTAB directly follows SYMTAB (no gap)
+ check_exe.checkComputeCompare("symnsyms 16 symoff * +", .{ .op = .eq, .value = .{ .variable = "dysymoff" } });
+
+ // STRTAB follows DYSYMTAB with possible gap
+ check_exe.checkComputeCompare("dysymnsyms 4 dysymoff * +", .{ .op = .lte, .value = .{ .variable = "stroff" } });
+
+ // all LINKEDIT sections apart from CODE_SIGNATURE are 8-bytes aligned
+ check_exe.checkComputeCompare("rebaseoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("bindoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("lazybindoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("exportoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("fstartoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("diceoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("symoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("stroff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_exe.checkComputeCompare("dysymoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+
+ switch (builtin.cpu.arch) {
+ .aarch64 => {
+ // LINKEDIT segment does not extend beyond, or does not include, CODE_SIGNATURE data
+ check_exe.checkComputeCompare("fileoff filesz codesigoff codesigsize + - -", .{
+ .op = .eq,
+ .value = .{ .literal = 0 },
+ });
+
+ // CODE_SIGNATURE data offset is 16-bytes aligned
+ check_exe.checkComputeCompare("codesigoff 16 %", .{ .op = .eq, .value = .{ .literal = 0 } });
+ },
+ .x86_64 => {
+ // LINKEDIT segment does not extend beyond, or does not include, strtab data
+ check_exe.checkComputeCompare("fileoff filesz stroff strsize + - -", .{
+ .op = .eq,
+ .value = .{ .literal = 0 },
+ });
+ },
+ else => unreachable,
+ }
+
+ const run = check_exe.runAndCompare();
+ run.expectStdOutEqual("Hello!\n");
+ test_step.dependOn(&run.step);
+}
diff --git a/test/link/macho/strict_validation/main.zig b/test/link/macho/strict_validation/main.zig
new file mode 100644
index 0000000000..6510e13fd7
--- /dev/null
+++ b/test/link/macho/strict_validation/main.zig
@@ -0,0 +1,6 @@
+const std = @import("std");
+
+pub fn main() !void {
+ const stdout = std.io.getStdOut().writer();
+ try stdout.writeAll("Hello!\n");
+}
diff --git a/test/link/macho/uuid/build.zig b/test/link/macho/uuid/build.zig
new file mode 100644
index 0000000000..f188b1fad8
--- /dev/null
+++ b/test/link/macho/uuid/build.zig
@@ -0,0 +1,62 @@
+const std = @import("std");
+const Builder = std.build.Builder;
+const LibExeObjectStep = std.build.LibExeObjStep;
+
+pub fn build(b: *Builder) void {
+ const test_step = b.step("test", "Test");
+ test_step.dependOn(b.getInstallStep());
+
+ // We force cross-compilation to ensure we always pick a generic CPU with constant set of CPU features.
+ const aarch64_macos = std.zig.CrossTarget{
+ .cpu_arch = .aarch64,
+ .os_tag = .macos,
+ };
+
+ testUuid(b, test_step, .ReleaseSafe, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
+ testUuid(b, test_step, .ReleaseFast, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
+ testUuid(b, test_step, .ReleaseSmall, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
+
+ const x86_64_macos = std.zig.CrossTarget{
+ .cpu_arch = .x86_64,
+ .os_tag = .macos,
+ };
+
+ testUuid(b, test_step, .ReleaseSafe, x86_64_macos, "342ac765194131e1bad5692b9e0e54a4");
+ testUuid(b, test_step, .ReleaseFast, x86_64_macos, "342ac765194131e1bad5692b9e0e54a4");
+ testUuid(b, test_step, .ReleaseSmall, x86_64_macos, "f119310e24773ecf8ec42e09d0379dad");
+}
+
+fn testUuid(
+ b: *Builder,
+ test_step: *std.build.Step,
+ mode: std.builtin.Mode,
+ target: std.zig.CrossTarget,
+ comptime exp: []const u8,
+) void {
+ // The calculated UUID value is independent of debug info and so it should
+ // stay the same across builds.
+ {
+ const dylib = simpleDylib(b, mode, target);
+ const check_dylib = dylib.checkObject(.macho);
+ check_dylib.checkStart("cmd UUID");
+ check_dylib.checkNext("uuid " ++ exp);
+ test_step.dependOn(&check_dylib.step);
+ }
+ {
+ const dylib = simpleDylib(b, mode, target);
+ dylib.strip = true;
+ const check_dylib = dylib.checkObject(.macho);
+ check_dylib.checkStart("cmd UUID");
+ check_dylib.checkNext("uuid " ++ exp);
+ test_step.dependOn(&check_dylib.step);
+ }
+}
+
+fn simpleDylib(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
+ const dylib = b.addSharedLibrary("test", null, b.version(1, 0, 0));
+ dylib.setTarget(target);
+ dylib.setBuildMode(mode);
+ dylib.addCSourceFile("test.c", &.{});
+ dylib.linkLibC();
+ return dylib;
+}
diff --git a/test/link/macho/uuid/test.c b/test/link/macho/uuid/test.c
new file mode 100644
index 0000000000..6f23a1a926
--- /dev/null
+++ b/test/link/macho/uuid/test.c
@@ -0,0 +1,2 @@
+void test() {}
+
diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig
index c9bc1aa106..e234a3f402 100644
--- a/test/link/wasm/bss/build.zig
+++ b/test/link/wasm/bss/build.zig
@@ -26,8 +26,7 @@ pub fn build(b: *Builder) void {
check_lib.checkNext("name memory"); // as per linker specification
// since we are importing memory, ensure it's not exported
- check_lib.checkStart("Section export");
- check_lib.checkNext("entries 1"); // we're exporting function 'foo' so only 1 entry
+ check_lib.checkNotPresent("Section export");
// validate the name of the stack pointer
check_lib.checkStart("Section custom");
diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig
new file mode 100644
index 0000000000..283566dab3
--- /dev/null
+++ b/test/link/wasm/export-data/build.zig
@@ -0,0 +1,39 @@
+const std = @import("std");
+const Builder = std.build.Builder;
+
+pub fn build(b: *Builder) void {
+ const test_step = b.step("test", "Test");
+ test_step.dependOn(b.getInstallStep());
+
+ const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
+ lib.setBuildMode(.ReleaseSafe); // to make the output deterministic in address positions
+ lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ lib.use_lld = false;
+ lib.export_symbol_names = &.{ "foo", "bar" };
+ lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse
+
+ const check_lib = lib.checkObject(.wasm);
+
+ check_lib.checkStart("Section global");
+ check_lib.checkNext("entries 3");
+ check_lib.checkNext("type i32"); // stack pointer so skip other fields
+ check_lib.checkNext("type i32");
+ check_lib.checkNext("mutable false");
+ check_lib.checkNext("i32.const {foo_address}");
+ check_lib.checkNext("type i32");
+ check_lib.checkNext("mutable false");
+ check_lib.checkNext("i32.const {bar_address}");
+ check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 0 } });
+ check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 4 } });
+
+ check_lib.checkStart("Section export");
+ check_lib.checkNext("entries 3");
+ check_lib.checkNext("name foo");
+ check_lib.checkNext("kind global");
+ check_lib.checkNext("index 1");
+ check_lib.checkNext("name bar");
+ check_lib.checkNext("kind global");
+ check_lib.checkNext("index 2");
+
+ test_step.dependOn(&check_lib.step);
+}
diff --git a/test/link/wasm/export-data/lib.zig b/test/link/wasm/export-data/lib.zig
new file mode 100644
index 0000000000..dffce185fa
--- /dev/null
+++ b/test/link/wasm/export-data/lib.zig
@@ -0,0 +1,2 @@
+export const foo: u32 = 0xbbbbbbbb;
+export const bar: u32 = 0xbbbbbbbb;
diff --git a/test/link/wasm/export/build.zig b/test/link/wasm/export/build.zig
new file mode 100644
index 0000000000..181e77e296
--- /dev/null
+++ b/test/link/wasm/export/build.zig
@@ -0,0 +1,48 @@
+const std = @import("std");
+
+pub fn build(b: *std.build.Builder) void {
+ const mode = b.standardReleaseOptions();
+
+ const no_export = b.addSharedLibrary("no-export", "main.zig", .unversioned);
+ no_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ no_export.setBuildMode(mode);
+ no_export.use_llvm = false;
+ no_export.use_lld = false;
+
+ const dynamic_export = b.addSharedLibrary("dynamic", "main.zig", .unversioned);
+ dynamic_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ dynamic_export.setBuildMode(mode);
+ dynamic_export.rdynamic = true;
+ dynamic_export.use_llvm = false;
+ dynamic_export.use_lld = false;
+
+ const force_export = b.addSharedLibrary("force", "main.zig", .unversioned);
+ force_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ force_export.setBuildMode(mode);
+ force_export.export_symbol_names = &.{"foo"};
+ force_export.use_llvm = false;
+ force_export.use_lld = false;
+
+ const check_no_export = no_export.checkObject(.wasm);
+ check_no_export.checkStart("Section export");
+ check_no_export.checkNext("entries 1");
+ check_no_export.checkNext("name memory");
+ check_no_export.checkNext("kind memory");
+
+ const check_dynamic_export = dynamic_export.checkObject(.wasm);
+ check_dynamic_export.checkStart("Section export");
+ check_dynamic_export.checkNext("entries 2");
+ check_dynamic_export.checkNext("name foo");
+ check_dynamic_export.checkNext("kind function");
+
+ const check_force_export = force_export.checkObject(.wasm);
+ check_force_export.checkStart("Section export");
+ check_force_export.checkNext("entries 2");
+ check_force_export.checkNext("name foo");
+ check_force_export.checkNext("kind function");
+
+ const test_step = b.step("test", "Run linker test");
+ test_step.dependOn(&check_no_export.step);
+ test_step.dependOn(&check_dynamic_export.step);
+ test_step.dependOn(&check_force_export.step);
+}
diff --git a/test/link/wasm/export/main.zig b/test/link/wasm/export/main.zig
new file mode 100644
index 0000000000..0e416dbf18
--- /dev/null
+++ b/test/link/wasm/export/main.zig
@@ -0,0 +1 @@
+export fn foo() void {}
diff --git a/test/link/wasm/extern-mangle/build.zig b/test/link/wasm/extern-mangle/build.zig
index f2916c35a7..ae46117f18 100644
--- a/test/link/wasm/extern-mangle/build.zig
+++ b/test/link/wasm/extern-mangle/build.zig
@@ -10,6 +10,8 @@ pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
lib.setBuildMode(mode);
lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ lib.import_symbols = true; // import `a` and `b`
+ lib.rdynamic = true; // export `foo`
lib.install();
const check_lib = lib.checkObject(.wasm);
diff --git a/test/link/wasm/function-table/build.zig b/test/link/wasm/function-table/build.zig
new file mode 100644
index 0000000000..f7572bd6b1
--- /dev/null
+++ b/test/link/wasm/function-table/build.zig
@@ -0,0 +1,63 @@
+const std = @import("std");
+const Builder = std.build.Builder;
+
+pub fn build(b: *Builder) void {
+ const mode = b.standardReleaseOptions();
+
+ const test_step = b.step("test", "Test");
+ test_step.dependOn(b.getInstallStep());
+
+ const import_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
+ import_table.setBuildMode(mode);
+ import_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ import_table.use_llvm = false;
+ import_table.use_lld = false;
+ import_table.import_table = true;
+
+ const export_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
+ export_table.setBuildMode(mode);
+ export_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ export_table.use_llvm = false;
+ export_table.use_lld = false;
+ export_table.export_table = true;
+
+ const regular_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
+ regular_table.setBuildMode(mode);
+ regular_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ regular_table.use_llvm = false;
+ regular_table.use_lld = false;
+
+ const check_import = import_table.checkObject(.wasm);
+ const check_export = export_table.checkObject(.wasm);
+ const check_regular = regular_table.checkObject(.wasm);
+
+ check_import.checkStart("Section import");
+ check_import.checkNext("entries 1");
+ check_import.checkNext("module env");
+ check_import.checkNext("name __indirect_function_table");
+ check_import.checkNext("kind table");
+ check_import.checkNext("type funcref");
+ check_import.checkNext("min 1"); // 1 function pointer
+ check_import.checkNotPresent("max"); // when importing, we do not provide a max
+ check_import.checkNotPresent("Section table"); // we're importing it
+
+ check_export.checkStart("Section export");
+ check_export.checkNext("entries 2");
+ check_export.checkNext("name __indirect_function_table"); // as per linker specification
+ check_export.checkNext("kind table");
+
+ check_regular.checkStart("Section table");
+ check_regular.checkNext("entries 1");
+ check_regular.checkNext("type funcref");
+ check_regular.checkNext("min 2"); // index starts at 1 & 1 function pointer = 2.
+ check_regular.checkNext("max 2");
+ check_regular.checkStart("Section element");
+ check_regular.checkNext("entries 1");
+ check_regular.checkNext("table index 0");
+ check_regular.checkNext("i32.const 1"); // we want to start function indexes at 1
+ check_regular.checkNext("indexes 1"); // 1 function pointer
+
+ test_step.dependOn(&check_import.step);
+ test_step.dependOn(&check_export.step);
+ test_step.dependOn(&check_regular.step);
+}
diff --git a/test/link/wasm/function-table/lib.zig b/test/link/wasm/function-table/lib.zig
new file mode 100644
index 0000000000..ed7a85b2db
--- /dev/null
+++ b/test/link/wasm/function-table/lib.zig
@@ -0,0 +1,7 @@
+var func: *const fn () void = &bar;
+
+export fn foo() void {
+ func();
+}
+
+fn bar() void {}
diff --git a/test/stack_traces.zig b/test/stack_traces.zig
index afd08c7ba8..0ace004bc6 100644
--- a/test/stack_traces.zig
+++ b/test/stack_traces.zig
@@ -5,13 +5,13 @@ const tests = @import("tests.zig");
pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "return",
- .source =
+ .source =
\\pub fn main() !void {
\\ return error.TheSkyIsFalling;
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in main (test)
\\ return error.TheSkyIsFalling;
@@ -24,7 +24,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -33,13 +33,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
@@ -48,7 +48,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "try return",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -58,7 +58,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
@@ -73,7 +73,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.exclude_os = .{
.windows, // TODO
},
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -85,13 +85,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
@@ -99,7 +99,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
});
cases.addCase(.{
.name = "non-error return pops error trace",
- .source =
+ .source =
\\fn bar() !void {
\\ return error.UhOh;
\\}
@@ -116,7 +116,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\source.zig:13:5: [address] in main (test)
\\ return error.UnrelatedError;
@@ -129,7 +129,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: UnrelatedError
\\source.zig:13:5: [address] in [function]
\\ return error.UnrelatedError;
@@ -138,13 +138,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\
,
@@ -153,7 +153,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "continue in while loop",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.UhOh;
\\}
@@ -167,7 +167,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\source.zig:10:5: [address] in main (test)
\\ return error.UnrelatedError;
@@ -180,7 +180,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: UnrelatedError
\\source.zig:10:5: [address] in [function]
\\ return error.UnrelatedError;
@@ -189,13 +189,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: UnrelatedError
\\
,
@@ -204,7 +204,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "try return + handled catch/if-else",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -218,7 +218,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
@@ -234,7 +234,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -246,13 +246,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
@@ -261,7 +261,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "break from inline loop pops error return trace",
- .source =
+ .source =
\\fn foo() !void { return error.FooBar; }
\\
\\pub fn main() !void {
@@ -277,7 +277,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: BadTime
\\source.zig:12:5: [address] in main (test)
\\ return error.BadTime;
@@ -290,7 +290,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: BadTime
\\source.zig:12:5: [address] in [function]
\\ return error.BadTime;
@@ -299,13 +299,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: BadTime
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: BadTime
\\
,
@@ -314,7 +314,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "catch and re-throw error",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -324,7 +324,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
@@ -340,7 +340,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -352,13 +352,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
@@ -367,7 +367,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "errors stored in var do not contribute to error trace",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -382,7 +382,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: SomethingUnrelatedWentWrong
\\source.zig:11:5: [address] in main (test)
\\ return error.SomethingUnrelatedWentWrong;
@@ -395,7 +395,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: SomethingUnrelatedWentWrong
\\source.zig:11:5: [address] in [function]
\\ return error.SomethingUnrelatedWentWrong;
@@ -404,13 +404,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: SomethingUnrelatedWentWrong
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: SomethingUnrelatedWentWrong
\\
,
@@ -419,7 +419,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "error stored in const has trace preserved for duration of block",
- .source =
+ .source =
\\fn foo() !void { return error.TheSkyIsFalling; }
\\fn bar() !void { return error.InternalError; }
\\fn baz() !void { return error.UnexpectedReality; }
@@ -446,7 +446,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: StillUnresolved
\\source.zig:1:18: [address] in foo (test)
\\fn foo() !void { return error.TheSkyIsFalling; }
@@ -465,7 +465,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO
.linux, // defeated by aggressive inlining
},
- .expect =
+ .expect =
\\error: StillUnresolved
\\source.zig:1:18: [address] in [function]
\\fn foo() !void { return error.TheSkyIsFalling; }
@@ -480,13 +480,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: StillUnresolved
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: StillUnresolved
\\
,
@@ -495,7 +495,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "error passed to function has its trace preserved for duration of the call",
- .source =
+ .source =
\\pub fn expectError(expected_error: anyerror, actual_error: anyerror!void) !void {
\\ actual_error catch |err| {
\\ if (err == expected_error) return {};
@@ -516,7 +516,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: TestExpectedError
\\source.zig:9:18: [address] in foo (test)
\\fn foo() !void { return error.Foo; }
@@ -534,7 +534,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.exclude_os = .{
.windows, // TODO
},
- .expect =
+ .expect =
\\error: TestExpectedError
\\source.zig:9:18: [address] in [function]
\\fn foo() !void { return error.Foo; }
@@ -549,13 +549,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: TestExpectedError
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: TestExpectedError
\\
,
@@ -564,7 +564,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "try return from within catch",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -580,7 +580,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
@@ -598,7 +598,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.exclude_os = .{
.windows, // TODO
},
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -613,13 +613,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
@@ -628,7 +628,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "try return from within if-else",
- .source =
+ .source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
@@ -644,7 +644,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
@@ -662,7 +662,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.exclude_os = .{
.windows, // TODO
},
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -677,13 +677,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: AndMyCarIsOutOfGas
\\
,
@@ -692,7 +692,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "try try return return",
- .source =
+ .source =
\\fn foo() !void {
\\ try bar();
\\}
@@ -710,7 +710,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:10:5: [address] in make_error (test)
\\ return error.TheSkyIsFalling;
@@ -731,7 +731,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.exclude_os = .{
.windows, // TODO
},
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\source.zig:10:5: [address] in [function]
\\ return error.TheSkyIsFalling;
@@ -749,13 +749,13 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
,
},
.ReleaseFast = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
- .expect =
+ .expect =
\\error: TheSkyIsFalling
\\
,
@@ -768,7 +768,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
.windows, // TODO intermittent failures
},
.name = "dumpCurrentStackTrace",
- .source =
+ .source =
\\const std = @import("std");
\\
\\fn bar() void {
@@ -783,7 +783,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
\\}
,
.Debug = .{
- .expect =
+ .expect =
\\source.zig:7:8: [address] in foo (test)
\\ bar();
\\ ^
diff --git a/test/standalone.zig b/test/standalone.zig
index 8605c2ad7d..1a51e183c0 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -52,7 +52,10 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/12194
cases.addBuildFile("test/standalone/issue_9812/build.zig", .{});
}
- cases.addBuildFile("test/standalone/issue_11595/build.zig", .{});
+ if (builtin.os.tag != .windows) {
+ // https://github.com/ziglang/zig/issues/12419
+ cases.addBuildFile("test/standalone/issue_11595/build.zig", .{});
+ }
if (builtin.os.tag != .wasi and
// https://github.com/ziglang/zig/issues/13550
@@ -63,6 +66,10 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/standalone/load_dynamic_library/build.zig", .{});
}
+ if (builtin.os.tag == .windows) {
+ cases.addBuildFile("test/standalone/windows_spawn/build.zig", .{});
+ }
+
cases.addBuildFile("test/standalone/c_compiler/build.zig", .{
.build_modes = true,
.cross_targets = true,
diff --git a/test/standalone/windows_spawn/build.zig b/test/standalone/windows_spawn/build.zig
new file mode 100644
index 0000000000..10a1132d3a
--- /dev/null
+++ b/test/standalone/windows_spawn/build.zig
@@ -0,0 +1,16 @@
+const Builder = @import("std").build.Builder;
+
+pub fn build(b: *Builder) void {
+ const mode = b.standardReleaseOptions();
+
+ const hello = b.addExecutable("hello", "hello.zig");
+ hello.setBuildMode(mode);
+
+ const main = b.addExecutable("main", "main.zig");
+ main.setBuildMode(mode);
+ const run = main.run();
+ run.addArtifactArg(hello);
+
+ const test_step = b.step("test", "Test it");
+ test_step.dependOn(&run.step);
+}
diff --git a/test/standalone/windows_spawn/hello.zig b/test/standalone/windows_spawn/hello.zig
new file mode 100644
index 0000000000..dcf917c430
--- /dev/null
+++ b/test/standalone/windows_spawn/hello.zig
@@ -0,0 +1,6 @@
+const std = @import("std");
+
+pub fn main() !void {
+ const stdout = std.io.getStdOut().writer();
+ try stdout.writeAll("hello from exe\n");
+}
diff --git a/test/standalone/windows_spawn/main.zig b/test/standalone/windows_spawn/main.zig
new file mode 100644
index 0000000000..c0cfd15133
--- /dev/null
+++ b/test/standalone/windows_spawn/main.zig
@@ -0,0 +1,170 @@
+const std = @import("std");
+const windows = std.os.windows;
+const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral;
+
+pub fn main() anyerror!void {
+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
+ defer if (gpa.deinit()) @panic("found memory leaks");
+ const allocator = gpa.allocator();
+
+ var it = try std.process.argsWithAllocator(allocator);
+ defer it.deinit();
+ _ = it.next() orelse unreachable; // skip binary name
+ const hello_exe_cache_path = it.next() orelse unreachable;
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const tmp_absolute_path = try tmp.dir.realpathAlloc(allocator, ".");
+ defer allocator.free(tmp_absolute_path);
+ const tmp_absolute_path_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, tmp_absolute_path);
+ defer allocator.free(tmp_absolute_path_w);
+ const cwd_absolute_path = try std.fs.cwd().realpathAlloc(allocator, ".");
+ defer allocator.free(cwd_absolute_path);
+ const tmp_relative_path = try std.fs.path.relative(allocator, cwd_absolute_path, tmp_absolute_path);
+ defer allocator.free(tmp_relative_path);
+
+ // Clear PATH
+ std.debug.assert(std.os.windows.kernel32.SetEnvironmentVariableW(
+ utf16Literal("PATH"),
+ null,
+ ) == windows.TRUE);
+
+ // Set PATHEXT to something predictable
+ std.debug.assert(std.os.windows.kernel32.SetEnvironmentVariableW(
+ utf16Literal("PATHEXT"),
+ utf16Literal(".COM;.EXE;.BAT;.CMD;.JS"),
+ ) == windows.TRUE);
+
+ // No PATH, so it should fail to find anything not in the cwd
+ try testExecError(error.FileNotFound, allocator, "something_missing");
+
+ std.debug.assert(std.os.windows.kernel32.SetEnvironmentVariableW(
+ utf16Literal("PATH"),
+ tmp_absolute_path_w,
+ ) == windows.TRUE);
+
+ // Move hello.exe into the tmp dir which is now added to the path
+ try std.fs.cwd().copyFile(hello_exe_cache_path, tmp.dir, "hello.exe", .{});
+
+ // with extension should find the .exe (case insensitive)
+ try testExec(allocator, "HeLLo.exe", "hello from exe\n");
+ // without extension should find the .exe (case insensitive)
+ try testExec(allocator, "heLLo", "hello from exe\n");
+
+ // now add a .bat
+ try tmp.dir.writeFile("hello.bat", "@echo hello from bat");
+ // and a .cmd
+ try tmp.dir.writeFile("hello.cmd", "@echo hello from cmd");
+
+ // with extension should find the .bat (case insensitive)
+ try testExec(allocator, "heLLo.bat", "hello from bat\r\n");
+ // with extension should find the .cmd (case insensitive)
+ try testExec(allocator, "heLLo.cmd", "hello from cmd\r\n");
+ // without extension should find the .exe (since its first in PATHEXT)
+ try testExec(allocator, "heLLo", "hello from exe\n");
+
+ // now rename the exe to not have an extension
+ try tmp.dir.rename("hello.exe", "hello");
+
+ // with extension should now fail
+ try testExecError(error.FileNotFound, allocator, "hello.exe");
+ // without extension should succeed (case insensitive)
+ try testExec(allocator, "heLLo", "hello from exe\n");
+
+ try tmp.dir.makeDir("something");
+ try tmp.dir.rename("hello", "something/hello.exe");
+
+ const relative_path_no_ext = try std.fs.path.join(allocator, &.{ tmp_relative_path, "something/hello" });
+ defer allocator.free(relative_path_no_ext);
+
+ // Giving a full relative path to something/hello should work
+ try testExec(allocator, relative_path_no_ext, "hello from exe\n");
+ // But commands with path separators get excluded from PATH searching, so this will fail
+ try testExecError(error.FileNotFound, allocator, "something/hello");
+
+ // Now that .BAT is the first PATHEXT that should be found, this should succeed
+ try testExec(allocator, "heLLo", "hello from bat\r\n");
+
+ // Add a hello.exe that is not a valid executable
+ try tmp.dir.writeFile("hello.exe", "invalid");
+
+ // Trying to execute it with extension will give InvalidExe. This is a special
+ // case for .EXE extensions, where if they ever try to get executed but they are
+ // invalid, that gets treated as a fatal error wherever they are found and InvalidExe
+ // is returned immediately.
+ try testExecError(error.InvalidExe, allocator, "hello.exe");
+ // Same thing applies to the command with no extension--even though there is a
+ // hello.bat that could be executed, it should stop after it tries executing
+ // hello.exe and getting InvalidExe.
+ try testExecError(error.InvalidExe, allocator, "hello");
+
+ // If we now rename hello.exe to have no extension, it will behave differently
+ try tmp.dir.rename("hello.exe", "hello");
+
+ // Now, trying to execute it without an extension should treat InvalidExe as recoverable
+ // and skip over it and find hello.bat and execute that
+ try testExec(allocator, "hello", "hello from bat\r\n");
+
+ // If we rename the invalid exe to something else
+ try tmp.dir.rename("hello", "goodbye");
+ // Then we should now get FileNotFound when trying to execute 'goodbye',
+ // since that is what the original error will be after searching for 'goodbye'
+ // in the cwd. It will try to execute 'goodbye' from the PATH but the InvalidExe error
+ // should be ignored in this case.
+ try testExecError(error.FileNotFound, allocator, "goodbye");
+
+ // Now let's set the tmp dir as the cwd and set the path only include the "something" sub dir
+ try tmp.dir.setAsCwd();
+ const something_subdir_abs_path = try std.mem.concatWithSentinel(allocator, u16, &.{ tmp_absolute_path_w, utf16Literal("\\something") }, 0);
+ defer allocator.free(something_subdir_abs_path);
+
+ std.debug.assert(std.os.windows.kernel32.SetEnvironmentVariableW(
+ utf16Literal("PATH"),
+ something_subdir_abs_path,
+ ) == windows.TRUE);
+
+ // Now trying to execute goodbye should give error.InvalidExe since it's the original
+ // error that we got when trying within the cwd
+ try testExecError(error.InvalidExe, allocator, "goodbye");
+
+ // hello should still find the .bat
+ try testExec(allocator, "hello", "hello from bat\r\n");
+
+ // If we rename something/hello.exe to something/goodbye.exe
+ try tmp.dir.rename("something/hello.exe", "something/goodbye.exe");
+ // And try to execute goodbye, then the one in something should be found
+ // since the one in cwd is an invalid executable
+ try testExec(allocator, "goodbye", "hello from exe\n");
+
+ // If we use an absolute path to execute the invalid goodbye
+ const goodbye_abs_path = try std.mem.join(allocator, "\\", &.{ tmp_absolute_path, "goodbye" });
+ defer allocator.free(goodbye_abs_path);
+ // then the PATH should not be searched and we should get InvalidExe
+ try testExecError(error.InvalidExe, allocator, goodbye_abs_path);
+
+ // If we try to exec but provide a cwd that is an absolute path, the PATH
+ // should still be searched and the goodbye.exe in something should be found.
+ try testExecWithCwd(allocator, "goodbye", tmp_absolute_path, "hello from exe\n");
+}
+
+fn testExecError(err: anyerror, allocator: std.mem.Allocator, command: []const u8) !void {
+ return std.testing.expectError(err, testExec(allocator, command, ""));
+}
+
+fn testExec(allocator: std.mem.Allocator, command: []const u8, expected_stdout: []const u8) !void {
+ return testExecWithCwd(allocator, command, null, expected_stdout);
+}
+
+fn testExecWithCwd(allocator: std.mem.Allocator, command: []const u8, cwd: ?[]const u8, expected_stdout: []const u8) !void {
+ var result = try std.ChildProcess.exec(.{
+ .allocator = allocator,
+ .argv = &[_][]const u8{command},
+ .cwd = cwd,
+ });
+ defer allocator.free(result.stdout);
+ defer allocator.free(result.stderr);
+
+ try std.testing.expectEqualStrings("", result.stderr);
+ try std.testing.expectEqualStrings(expected_stdout, result.stdout);
+}
diff --git a/tools/extract-grammar.zig b/tools/extract-grammar.zig
new file mode 100644
index 0000000000..8e8a17bacf
--- /dev/null
+++ b/tools/extract-grammar.zig
@@ -0,0 +1,100 @@
+//! Extract the "de facto" Zig Grammar from the parser in lib/std/zig/parse.zig.
+//!
+//! The generated file must be edited by hand, in order to remove normal doc-comments.
+
+const std = @import("std");
+const fs = std.fs;
+const heap = std.heap;
+const io = std.io;
+const mem = std.mem;
+const process = std.process;
+const zig = std.zig;
+
+const Buffer = struct {
+ const buf_size = 4096;
+
+ buf: [buf_size]u8 = undefined,
+ pos: usize = 0,
+
+ pub fn append(self: *Buffer, src: []const u8) !void {
+ if (self.pos + src.len > buf_size) {
+ return error.BufferOverflow;
+ }
+
+ mem.copy(u8, self.buf[self.pos..buf_size], src);
+ self.pos += src.len;
+ }
+
+ pub fn reset(self: *Buffer) void {
+ self.pos = 0;
+ }
+
+ pub fn slice(self: *Buffer) []const u8 {
+ return self.buf[0..self.pos];
+ }
+};
+
+/// There are many assumptions in the entire codebase that Zig source files can
+/// be byte-indexed with a u32 integer.
+const max_src_size = std.math.maxInt(u32);
+
+var stdout = io.getStdOut().writer();
+
+pub fn main() !void {
+ var arena = heap.ArenaAllocator.init(heap.page_allocator);
+ defer arena.deinit(); // NOTE(mperillo): Can be removed.
+
+ const allocator = arena.allocator();
+
+ var args_it = try process.argsWithAllocator(allocator);
+ _ = args_it.skip(); // it is safe to ignore
+
+ const path = args_it.next() orelse return error.SourceFileRequired;
+ const src = try read(path, allocator);
+
+ var tokenizer = zig.Tokenizer.init(src);
+ var buf: Buffer = Buffer{};
+ while (true) {
+ const token = tokenizer.next();
+ switch (token.tag) {
+ .eof => break,
+ .doc_comment => {
+ const line = blk: {
+ // Strip leading whitespace.
+ const len = token.loc.end - token.loc.start;
+ break :blk if (len == 3) src[token.loc.start + 3 .. token.loc.end] else src[token.loc.start + 4 .. token.loc.end];
+ };
+
+ try buf.append(line);
+ try buf.append("\n");
+ },
+ .keyword_fn => {
+ const doc = buf.slice();
+ buf.reset();
+
+ // Check if doc contains a PEG grammar block, so that normal
+ // doc-comments are ignored.
+ if (mem.indexOf(u8, doc, "<-") != null) {
+ // Separate each doc with an empty line. This in turn will
+ // ensure that rules are separate by an empty line.
+ try stdout.print("{s}\n", .{doc});
+ }
+ },
+ else => {},
+ }
+ }
+}
+
+fn read(path: []const u8, allocator: mem.Allocator) ![:0]const u8 {
+ var f = try fs.cwd().openFile(path, .{ .mode = .read_only });
+ defer f.close();
+
+ const st = try f.stat();
+ if (st.size > max_src_size) return error.FileTooBig;
+
+ const src = try allocator.allocSentinel(u8, @intCast(usize, st.size), 0);
+ const n = try f.readAll(src);
+ if (n != st.size) return error.UnexpectedEndOfFile;
+
+ return src;
+}
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index 2b21731224..83fd12e7bd 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -865,12 +865,14 @@ const blacklisted_symbols = [_][]const u8{
"__ucmpsi2",
"__ucmpti2",
"__udivdi3",
+ "__udivei4",
"__udivmoddi4",
"__udivmodsi4",
"__udivmodti4",
"__udivsi3",
"__udivti3",
"__umoddi3",
+ "__umodei4",
"__umodsi3",
"__umodti3",
"__unorddf2",
diff --git a/tools/stage2_gdb_pretty_printers.py b/tools/stage2_gdb_pretty_printers.py
index 5eca2fdec2..215b27699f 100644
--- a/tools/stage2_gdb_pretty_printers.py
+++ b/tools/stage2_gdb_pretty_printers.py
@@ -2,8 +2,13 @@
# put "source /path/to/stage2_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
import re
import gdb.printing
+
+import sys
+from pathlib import Path
+sys.path.insert(0, str(Path(__file__).parent))
import stage2_pretty_printers_common as common
+
class TypePrinter:
def __init__(self, val):
self.val = val
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 92e0757ac7..889e5bcbfe 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -492,6 +492,14 @@ const known_options = [_]KnownOpt{
.name = "compress-debug-sections=",
.ident = "compress_debug_sections",
},
+ .{
+ .name = "install_name",
+ .ident = "install_name",
+ },
+ .{
+ .name = "undefined",
+ .ident = "undefined",
+ },
};
const blacklisted_options = [_][]const u8{};