aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt17
-rw-r--r--doc/langref/build.zig6
-rw-r--r--doc/langref/build_c.zig12
-rw-r--r--doc/langref/build_object.zig12
-rw-r--r--lib/compiler/build_runner.zig10
-rw-r--r--lib/compiler/objcopy.zig969
-rw-r--r--lib/compiler/std-docs.zig45
-rw-r--r--lib/compiler/test_runner.zig39
-rw-r--r--lib/compiler_rt.zig4
-rw-r--r--lib/compiler_rt/addo.zig4
-rw-r--r--lib/compiler_rt/addoti4_test.zig3
-rw-r--r--lib/compiler_rt/clear_cache.zig14
-rw-r--r--lib/compiler_rt/cmp.zig1
-rw-r--r--lib/compiler_rt/common.zig7
-rw-r--r--lib/compiler_rt/comparedf2_test.zig1
-rw-r--r--lib/compiler_rt/comparesf2_test.zig1
-rw-r--r--lib/compiler_rt/count0bits.zig1
-rw-r--r--lib/compiler_rt/divdf3.zig1
-rw-r--r--lib/compiler_rt/divmodei4.zig4
-rw-r--r--lib/compiler_rt/fixint_test.zig1
-rw-r--r--lib/compiler_rt/int.zig1
-rw-r--r--lib/compiler_rt/memcpy.zig4
-rw-r--r--lib/compiler_rt/memmove.zig16
-rw-r--r--lib/compiler_rt/mulf3.zig4
-rw-r--r--lib/compiler_rt/rem_pio2_large.zig2
-rw-r--r--lib/compiler_rt/stack_probe.zig1
-rw-r--r--lib/compiler_rt/suboti4_test.zig3
-rw-r--r--lib/compiler_rt/udivmod.zig10
-rw-r--r--lib/compiler_rt/udivmodei4.zig5
-rw-r--r--lib/docs/wasm/main.zig6
-rw-r--r--lib/init/build.zig1
-rw-r--r--lib/std/Build.zig227
-rw-r--r--lib/std/Build/Fuzz/WebServer.zig39
-rw-r--r--lib/std/Build/Step.zig59
-rw-r--r--lib/std/Build/Step/Compile.zig46
-rw-r--r--lib/std/Build/Step/Run.zig97
-rw-r--r--lib/std/Io.zig479
-rw-r--r--lib/std/Io/DeprecatedReader.zig8
-rw-r--r--lib/std/Io/DeprecatedWriter.zig7
-rw-r--r--lib/std/Io/Reader.zig140
-rw-r--r--lib/std/Io/Writer.zig113
-rw-r--r--lib/std/Io/buffered_atomic_file.zig55
-rw-r--r--lib/std/Io/c_writer.zig44
-rw-r--r--lib/std/Io/change_detection_stream.zig55
-rw-r--r--lib/std/Io/find_byte_writer.zig40
-rw-r--r--lib/std/Io/multi_writer.zig53
-rw-r--r--lib/std/Io/stream_source.zig127
-rw-r--r--lib/std/Progress.zig75
-rw-r--r--lib/std/builtin.zig7
-rw-r--r--lib/std/c.zig6
-rw-r--r--lib/std/compress.zig60
-rw-r--r--lib/std/compress/xz.zig49
-rw-r--r--lib/std/compress/xz/block.zig2
-rw-r--r--lib/std/compress/zstandard.zig310
-rw-r--r--lib/std/compress/zstandard/decode/block.zig1149
-rw-r--r--lib/std/compress/zstandard/decode/fse.zig153
-rw-r--r--lib/std/compress/zstandard/decode/huffman.zig234
-rw-r--r--lib/std/compress/zstandard/decompress.zig633
-rw-r--r--lib/std/compress/zstandard/readers.zig82
-rw-r--r--lib/std/compress/zstandard/types.zig403
-rw-r--r--lib/std/compress/zstd.zig152
-rw-r--r--lib/std/compress/zstd/Decompress.zig1840
-rw-r--r--lib/std/crypto/md5.zig12
-rw-r--r--lib/std/elf.zig275
-rw-r--r--lib/std/fs/AtomicFile.zig98
-rw-r--r--lib/std/fs/Dir.zig180
-rw-r--r--lib/std/fs/File.zig317
-rw-r--r--lib/std/fs/test.zig86
-rw-r--r--lib/std/http/Server.zig3
-rw-r--r--lib/std/json.zig1
-rw-r--r--lib/std/math.zig93
-rw-r--r--lib/std/math/big/int_test.zig1
-rw-r--r--lib/std/math/float.zig6
-rw-r--r--lib/std/math/log10.zig1
-rw-r--r--lib/std/mem.zig7
-rw-r--r--lib/std/os/linux.zig1
-rw-r--r--lib/std/posix.zig319
-rw-r--r--lib/std/process/Child.zig81
-rw-r--r--lib/std/start.zig59
-rw-r--r--lib/std/tar.zig612
-rw-r--r--lib/std/tar/Writer.zig462
-rw-r--r--lib/std/tar/test.zig350
-rw-r--r--lib/std/tar/writer.zig497
-rw-r--r--lib/std/testing.zig1
-rw-r--r--lib/std/zig.zig39
-rw-r--r--src/Builtin.zig4
-rw-r--r--src/Compilation.zig315
-rw-r--r--src/InternPool.zig14
-rw-r--r--src/Package/Fetch.zig36
-rw-r--r--src/Package/Fetch/git.zig65
-rw-r--r--src/Package/Module.zig2
-rw-r--r--src/Sema.zig31
-rw-r--r--src/Type.zig2
-rw-r--r--src/Zcu.zig29
-rw-r--r--src/Zcu/PerThread.zig35
-rw-r--r--src/arch/aarch64/bits.zig2063
-rw-r--r--src/arch/riscv64/CodeGen.zig10
-rw-r--r--src/arch/sparc64/CodeGen.zig4
-rw-r--r--src/arch/wasm/CodeGen.zig31
-rw-r--r--src/arch/x86_64/CodeGen.zig108
-rw-r--r--src/arch/x86_64/Emit.zig11
-rw-r--r--src/codegen.zig61
-rw-r--r--src/codegen/aarch64.zig205
-rw-r--r--src/codegen/aarch64/Assemble.zig1682
-rw-r--r--src/codegen/aarch64/Disassemble.zig905
-rw-r--r--src/codegen/aarch64/Mir.zig348
-rw-r--r--src/codegen/aarch64/Select.zig12141
-rw-r--r--src/codegen/aarch64/abi.zig20
-rw-r--r--src/codegen/aarch64/encoding.zig12194
-rw-r--r--src/codegen/aarch64/instructions.zon1543
-rw-r--r--src/codegen/c.zig33
-rw-r--r--src/codegen/llvm.zig17
-rw-r--r--src/codegen/spirv.zig6
-rw-r--r--src/dev.zig41
-rw-r--r--src/fmt.zig4
-rw-r--r--src/link.zig1
-rw-r--r--src/link/Coff.zig76
-rw-r--r--src/link/Dwarf.zig8
-rw-r--r--src/link/Elf/Atom.zig70
-rw-r--r--src/link/Elf/Thunk.zig15
-rw-r--r--src/link/Elf/ZigObject.zig10
-rw-r--r--src/link/Elf/relocation.zig24
-rw-r--r--src/link/Elf/synthetic_sections.zig75
-rw-r--r--src/link/MachO.zig4
-rw-r--r--src/link/MachO/Atom.zig100
-rw-r--r--src/link/MachO/Thunk.zig10
-rw-r--r--src/link/MachO/ZigObject.zig10
-rw-r--r--src/link/MachO/synthetic.zig95
-rw-r--r--src/link/aarch64.zig64
-rw-r--r--src/main.zig151
-rw-r--r--src/target.zig30
-rw-r--r--stage1/wasm2c.c2
-rw-r--r--stage1/zig1.wasmbin2889638 -> 2897241 bytes
-rw-r--r--test/behavior.zig4
-rw-r--r--test/behavior/abs.zig10
-rw-r--r--test/behavior/align.zig20
-rw-r--r--test/behavior/array.zig42
-rw-r--r--test/behavior/asm.zig9
-rw-r--r--test/behavior/atomics.zig25
-rw-r--r--test/behavior/basic.zig22
-rw-r--r--test/behavior/bit_shifting.zig3
-rw-r--r--test/behavior/bitcast.zig21
-rw-r--r--test/behavior/bitreverse.zig8
-rw-r--r--test/behavior/builtin_functions_returning_void_or_noreturn.zig1
-rw-r--r--test/behavior/byteswap.zig75
-rw-r--r--test/behavior/call.zig19
-rw-r--r--test/behavior/cast.zig118
-rw-r--r--test/behavior/cast_int.zig7
-rw-r--r--test/behavior/comptime_memory.zig2
-rw-r--r--test/behavior/const_slice_child.zig1
-rw-r--r--test/behavior/decl_literals.zig1
-rw-r--r--test/behavior/defer.zig8
-rw-r--r--test/behavior/enum.zig29
-rw-r--r--test/behavior/error.zig25
-rw-r--r--test/behavior/eval.zig36
-rw-r--r--test/behavior/export_builtin.zig4
-rw-r--r--test/behavior/field_parent_ptr.zig3
-rw-r--r--test/behavior/floatop.zig86
-rw-r--r--test/behavior/fn.zig13
-rw-r--r--test/behavior/for.zig20
-rw-r--r--test/behavior/generics.zig19
-rw-r--r--test/behavior/globals.zig6
-rw-r--r--test/behavior/if.zig6
-rw-r--r--test/behavior/import_c_keywords.zig1
-rw-r--r--test/behavior/inline_switch.zig9
-rw-r--r--test/behavior/int128.zig5
-rw-r--r--test/behavior/int_comparison_elision.zig1
-rw-r--r--test/behavior/ir_block_deps.zig1
-rw-r--r--test/behavior/lower_strlit_to_vector.zig1
-rw-r--r--test/behavior/math.zig72
-rw-r--r--test/behavior/maximum_minimum.zig22
-rw-r--r--test/behavior/member_func.zig2
-rw-r--r--test/behavior/memcpy.zig5
-rw-r--r--test/behavior/memmove.zig3
-rw-r--r--test/behavior/memset.zig7
-rw-r--r--test/behavior/muladd.zig14
-rw-r--r--test/behavior/multiple_externs_with_conflicting_types.zig1
-rw-r--r--test/behavior/nan.zig1
-rw-r--r--test/behavior/null.zig6
-rw-r--r--test/behavior/optional.zig20
-rw-r--r--test/behavior/packed-struct.zig46
-rw-r--r--test/behavior/packed-union.zig6
-rw-r--r--test/behavior/packed_struct_explicit_backing_int.zig1
-rw-r--r--test/behavior/pointers.zig14
-rw-r--r--test/behavior/popcount.zig5
-rw-r--r--test/behavior/ptrcast.zig29
-rw-r--r--test/behavior/ptrfromint.zig3
-rw-r--r--test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig1
-rw-r--r--test/behavior/reflection.zig1
-rw-r--r--test/behavior/return_address.zig1
-rw-r--r--test/behavior/saturating_arithmetic.zig17
-rw-r--r--test/behavior/select.zig5
-rw-r--r--test/behavior/shuffle.zig8
-rw-r--r--test/behavior/sizeof_and_typeof.zig3
-rw-r--r--test/behavior/slice.zig24
-rw-r--r--test/behavior/src.zig1
-rw-r--r--test/behavior/string_literals.zig6
-rw-r--r--test/behavior/struct.zig59
-rw-r--r--test/behavior/struct_contains_null_ptr_itself.zig1
-rw-r--r--test/behavior/struct_contains_slice_of_itself.zig3
-rw-r--r--test/behavior/switch.zig38
-rw-r--r--test/behavior/switch_loop.zig41
-rw-r--r--test/behavior/switch_on_captured_error.zig1
-rw-r--r--test/behavior/switch_prong_err_enum.zig2
-rw-r--r--test/behavior/switch_prong_implicit_cast.zig2
-rw-r--r--test/behavior/this.zig2
-rw-r--r--test/behavior/threadlocal.zig3
-rw-r--r--test/behavior/truncate.zig2
-rw-r--r--test/behavior/try.zig82
-rw-r--r--test/behavior/tuple.zig22
-rw-r--r--test/behavior/tuple_declarations.zig2
-rw-r--r--test/behavior/type.zig9
-rw-r--r--test/behavior/type_info.zig6
-rw-r--r--test/behavior/typename.zig8
-rw-r--r--test/behavior/undefined.zig4
-rw-r--r--test/behavior/union.zig104
-rw-r--r--test/behavior/union_with_members.zig1
-rw-r--r--test/behavior/var_args.zig18
-rw-r--r--test/behavior/vector.zig98
-rw-r--r--test/behavior/void.zig1
-rw-r--r--test/behavior/while.zig9
-rw-r--r--test/behavior/widening.zig5
-rw-r--r--test/c_import/macros.zig12
-rw-r--r--test/cases/array_in_anon_struct.zig2
-rw-r--r--test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig4
-rw-r--r--test/cases/compile_errors/error_set_membership.zig2
-rw-r--r--test/cases/compile_errors/function_ptr_alignment.zig2
-rw-r--r--test/cases/compile_errors/issue_15572_break_on_inline_while.zig2
-rw-r--r--test/cases/compile_errors/switch_on_non_err_union.zig2
-rw-r--r--test/cases/pic_freestanding.zig2
-rw-r--r--test/cases/safety/@alignCast misaligned.zig2
-rw-r--r--test/cases/safety/@enumFromInt - no matching tag value.zig2
-rw-r--r--test/cases/safety/@enumFromInt truncated bits - exhaustive.zig2
-rw-r--r--test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig2
-rw-r--r--test/cases/safety/@errorCast error not present in destination.zig2
-rw-r--r--test/cases/safety/@errorCast error union casted to disjoint set.zig2
-rw-r--r--test/cases/safety/@intCast to u0.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - negative out of range.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig2
-rw-r--r--test/cases/safety/@intFromFloat cannot fit - positive out of range.zig2
-rw-r--r--test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig2
-rw-r--r--test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig2
-rw-r--r--test/cases/safety/@ptrFromInt with misaligned address.zig2
-rw-r--r--test/cases/safety/@tagName on corrupted enum value.zig2
-rw-r--r--test/cases/safety/@tagName on corrupted union value.zig2
-rw-r--r--test/cases/safety/array slice sentinel mismatch vector.zig2
-rw-r--r--test/cases/safety/array slice sentinel mismatch.zig2
-rw-r--r--test/cases/safety/bad union field access.zig2
-rw-r--r--test/cases/safety/calling panic.zig2
-rw-r--r--test/cases/safety/cast []u8 to bigger slice of wrong size.zig2
-rw-r--r--test/cases/safety/cast integer to global error and no code matches.zig2
-rw-r--r--test/cases/safety/empty slice with sentinel out of bounds.zig2
-rw-r--r--test/cases/safety/exact division failure - vectors.zig2
-rw-r--r--test/cases/safety/exact division failure.zig2
-rw-r--r--test/cases/safety/for_len_mismatch.zig2
-rw-r--r--test/cases/safety/for_len_mismatch_three.zig2
-rw-r--r--test/cases/safety/ignored expression integer overflow.zig2
-rw-r--r--test/cases/safety/integer addition overflow.zig2
-rw-r--r--test/cases/safety/integer division by zero - vectors.zig2
-rw-r--r--test/cases/safety/integer division by zero.zig2
-rw-r--r--test/cases/safety/integer multiplication overflow.zig2
-rw-r--r--test/cases/safety/integer negation overflow.zig2
-rw-r--r--test/cases/safety/integer subtraction overflow.zig2
-rw-r--r--test/cases/safety/memcpy_alias.zig2
-rw-r--r--test/cases/safety/memcpy_len_mismatch.zig2
-rw-r--r--test/cases/safety/memmove_len_mismatch.zig2
-rw-r--r--test/cases/safety/memset_array_undefined_bytes.zig2
-rw-r--r--test/cases/safety/memset_array_undefined_large.zig2
-rw-r--r--test/cases/safety/memset_slice_undefined_bytes.zig2
-rw-r--r--test/cases/safety/memset_slice_undefined_large.zig2
-rw-r--r--test/cases/safety/modrem by zero.zig2
-rw-r--r--test/cases/safety/modulus by zero.zig2
-rw-r--r--test/cases/safety/noreturn returned.zig2
-rw-r--r--test/cases/safety/optional unwrap operator on C pointer.zig2
-rw-r--r--test/cases/safety/optional unwrap operator on null pointer.zig2
-rw-r--r--test/cases/safety/optional_empty_error_set.zig2
-rw-r--r--test/cases/safety/out of bounds array slice by length.zig2
-rw-r--r--test/cases/safety/out of bounds slice access.zig2
-rw-r--r--test/cases/safety/pointer casting null to non-optional pointer.zig2
-rw-r--r--test/cases/safety/pointer casting to null function pointer.zig2
-rw-r--r--test/cases/safety/pointer slice sentinel mismatch.zig2
-rw-r--r--test/cases/safety/remainder division by zero.zig2
-rw-r--r--test/cases/safety/shift left by huge amount.zig2
-rw-r--r--test/cases/safety/shift right by huge amount.zig2
-rw-r--r--test/cases/safety/signed integer division overflow - vectors.zig2
-rw-r--r--test/cases/safety/signed integer division overflow.zig2
-rw-r--r--test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig2
-rw-r--r--test/cases/safety/signed integer not fitting in cast to unsigned integer.zig2
-rw-r--r--test/cases/safety/signed shift left overflow.zig2
-rw-r--r--test/cases/safety/signed shift right overflow.zig2
-rw-r--r--test/cases/safety/signed-unsigned vector cast.zig2
-rw-r--r--test/cases/safety/slice by length sentinel mismatch on lhs.zig2
-rw-r--r--test/cases/safety/slice by length sentinel mismatch on rhs.zig2
-rw-r--r--test/cases/safety/slice sentinel mismatch - floats.zig2
-rw-r--r--test/cases/safety/slice sentinel mismatch - optional pointers.zig2
-rw-r--r--test/cases/safety/slice slice sentinel mismatch.zig2
-rw-r--r--test/cases/safety/slice start index greater than end index.zig2
-rw-r--r--test/cases/safety/slice with sentinel out of bounds - runtime len.zig2
-rw-r--r--test/cases/safety/slice with sentinel out of bounds.zig2
-rw-r--r--test/cases/safety/slice_cast_change_len_0.zig2
-rw-r--r--test/cases/safety/slice_cast_change_len_1.zig2
-rw-r--r--test/cases/safety/slice_cast_change_len_2.zig2
-rw-r--r--test/cases/safety/slicing null C pointer - runtime len.zig2
-rw-r--r--test/cases/safety/slicing null C pointer.zig2
-rw-r--r--test/cases/safety/switch else on corrupt enum value - one prong.zig2
-rw-r--r--test/cases/safety/switch else on corrupt enum value - union.zig2
-rw-r--r--test/cases/safety/switch else on corrupt enum value.zig2
-rw-r--r--test/cases/safety/switch on corrupted enum value.zig2
-rw-r--r--test/cases/safety/switch on corrupted union value.zig2
-rw-r--r--test/cases/safety/truncating vector cast.zig2
-rw-r--r--test/cases/safety/unreachable.zig2
-rw-r--r--test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig2
-rw-r--r--test/cases/safety/unsigned shift left overflow.zig2
-rw-r--r--test/cases/safety/unsigned shift right overflow.zig2
-rw-r--r--test/cases/safety/unsigned-signed vector cast.zig2
-rw-r--r--test/cases/safety/unwrap error switch.zig2
-rw-r--r--test/cases/safety/unwrap error.zig2
-rw-r--r--test/cases/safety/value does not fit in shortening cast - u0.zig2
-rw-r--r--test/cases/safety/value does not fit in shortening cast.zig2
-rw-r--r--test/cases/safety/vector integer addition overflow.zig2
-rw-r--r--test/cases/safety/vector integer multiplication overflow.zig2
-rw-r--r--test/cases/safety/vector integer negation overflow.zig2
-rw-r--r--test/cases/safety/vector integer subtraction overflow.zig2
-rw-r--r--test/cases/safety/zero casted to error.zig2
-rw-r--r--test/cases/taking_pointer_of_global_tagged_union.zig2
-rw-r--r--test/incremental/fix_many_errors71
-rw-r--r--test/link/build.zig.zon3
-rw-r--r--test/link/elf.zig738
-rw-r--r--test/link/link.zig6
-rw-r--r--test/link/macho.zig246
-rw-r--r--test/link/wasm/extern/build.zig2
-rw-r--r--test/src/Cases.zig10
-rw-r--r--test/src/RunTranslatedC.zig2
-rw-r--r--test/standalone/build.zig.zon5
-rw-r--r--test/standalone/c_embed_path/build.zig6
-rw-r--r--test/standalone/dependencyFromBuildZig/build.zig.zon3
-rw-r--r--test/standalone/dependencyFromBuildZig/other/build.zig.zon3
-rw-r--r--test/standalone/dependency_options/build.zig148
-rw-r--r--test/standalone/dependency_options/build.zig.zon11
-rw-r--r--test/standalone/dependency_options/other/build.zig59
-rw-r--r--test/standalone/dependency_options/other/build.zig.zon7
-rw-r--r--test/standalone/extern/build.zig4
-rw-r--r--test/standalone/issue_794/build.zig2
-rw-r--r--test/standalone/stack_iterator/build.zig127
-rw-r--r--test/tests.zig73
-rw-r--r--tools/docgen.zig1
-rw-r--r--tools/gen_stubs.zig3
-rw-r--r--tools/incr-check.zig63
358 files changed, 35966 insertions, 11528 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d9824e5c12..4a3dc27a48 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -390,15 +390,6 @@ set(ZIG_STAGE2_SOURCES
lib/std/Io.zig
lib/std/Io/Reader.zig
lib/std/Io/Writer.zig
- lib/std/Io/buffered_atomic_file.zig
- lib/std/Io/buffered_writer.zig
- lib/std/Io/change_detection_stream.zig
- lib/std/Io/counting_reader.zig
- lib/std/Io/counting_writer.zig
- lib/std/Io/find_byte_writer.zig
- lib/std/Io/fixed_buffer_stream.zig
- lib/std/Io/limited_reader.zig
- lib/std/Io/seekable_stream.zig
lib/std/Progress.zig
lib/std/Random.zig
lib/std/Target.zig
@@ -550,6 +541,14 @@ set(ZIG_STAGE2_SOURCES
src/clang_options.zig
src/clang_options_data.zig
src/codegen.zig
+ src/codegen/aarch64.zig
+ src/codegen/aarch64/abi.zig
+ src/codegen/aarch64/Assemble.zig
+ src/codegen/aarch64/Disassemble.zig
+ src/codegen/aarch64/encoding.zig
+ src/codegen/aarch64/instructions.zon
+ src/codegen/aarch64/Mir.zig
+ src/codegen/aarch64/Select.zig
src/codegen/c.zig
src/codegen/c/Type.zig
src/codegen/llvm.zig
diff --git a/doc/langref/build.zig b/doc/langref/build.zig
index ca729b5b93..19e4b57c08 100644
--- a/doc/langref/build.zig
+++ b/doc/langref/build.zig
@@ -4,8 +4,10 @@ pub fn build(b: *std.Build) void {
const optimize = b.standardOptimizeOption(.{});
const exe = b.addExecutable(.{
.name = "example",
- .root_source_file = b.path("example.zig"),
- .optimize = optimize,
+ .root_module = b.createModule(.{
+ .root_source_file = b.path("example.zig"),
+ .optimize = optimize,
+ }),
});
b.default_step.dependOn(&exe.step);
}
diff --git a/doc/langref/build_c.zig b/doc/langref/build_c.zig
index 08f1683e9f..dc8e5553fc 100644
--- a/doc/langref/build_c.zig
+++ b/doc/langref/build_c.zig
@@ -4,15 +4,19 @@ pub fn build(b: *std.Build) void {
const lib = b.addLibrary(.{
.linkage = .dynamic,
.name = "mathtest",
- .root_source_file = b.path("mathtest.zig"),
+ .root_module = b.createModule(.{
+ .root_source_file = b.path("mathtest.zig"),
+ }),
.version = .{ .major = 1, .minor = 0, .patch = 0 },
});
const exe = b.addExecutable(.{
.name = "test",
+ .root_module = b.createModule(.{
+ .link_libc = true,
+ }),
});
- exe.addCSourceFile(.{ .file = b.path("test.c"), .flags = &.{"-std=c99"} });
- exe.linkLibrary(lib);
- exe.linkSystemLibrary("c");
+ exe.root_module.addCSourceFile(.{ .file = b.path("test.c"), .flags = &.{"-std=c99"} });
+ exe.root_module.linkLibrary(lib);
b.default_step.dependOn(&exe.step);
diff --git a/doc/langref/build_object.zig b/doc/langref/build_object.zig
index c08644b0d6..c9a3588d9b 100644
--- a/doc/langref/build_object.zig
+++ b/doc/langref/build_object.zig
@@ -3,15 +3,19 @@ const std = @import("std");
pub fn build(b: *std.Build) void {
const obj = b.addObject(.{
.name = "base64",
- .root_source_file = b.path("base64.zig"),
+ .root_module = b.createModule(.{
+ .root_source_file = b.path("base64.zig"),
+ }),
});
const exe = b.addExecutable(.{
.name = "test",
+ .root_module = b.createModule(.{
+ .link_libc = true,
+ }),
});
- exe.addCSourceFile(.{ .file = b.path("test.c"), .flags = &.{"-std=c99"} });
- exe.addObject(obj);
- exe.linkSystemLibrary("c");
+ exe.root_module.addCSourceFile(.{ .file = b.path("test.c"), .flags = &.{"-std=c99"} });
+ exe.root_module.addObject(obj);
b.installArtifact(exe);
}
diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig
index 693e9b4c70..6b7266ee71 100644
--- a/lib/compiler/build_runner.zig
+++ b/lib/compiler/build_runner.zig
@@ -696,8 +696,11 @@ fn runStepNames(
.failures, .none => true,
else => false,
};
- if (failure_count == 0 and failures_only) {
- return run.cleanExit();
+ if (failure_count == 0) {
+ std.Progress.setStatus(.success);
+ if (failures_only) return run.cleanExit();
+ } else {
+ std.Progress.setStatus(.failure);
}
const ttyconf = run.ttyconf;
@@ -708,7 +711,7 @@ fn runStepNames(
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(w, .cyan) catch {};
- w.writeAll("Build Summary:") catch {};
+ w.writeAll("\nBuild Summary:") catch {};
ttyconf.setColor(w, .reset) catch {};
w.print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
if (skipped_count > 0) w.print("; {d} skipped", .{skipped_count}) catch {};
@@ -1149,6 +1152,7 @@ fn workerMakeOneStep(
} else |err| switch (err) {
error.MakeFailed => {
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
+ std.Progress.setStatus(.failure_working);
break :handle_result;
},
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig
index 52ffe208f6..5908f8b73d 100644
--- a/lib/compiler/objcopy.zig
+++ b/lib/compiler/objcopy.zig
@@ -13,6 +13,9 @@ const Server = std.zig.Server;
var stdin_buffer: [1024]u8 = undefined;
var stdout_buffer: [1024]u8 = undefined;
+var input_buffer: [1024]u8 = undefined;
+var output_buffer: [1024]u8 = undefined;
+
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
@@ -145,13 +148,16 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const input = opt_input orelse fatal("expected input parameter", .{});
const output = opt_output orelse fatal("expected output parameter", .{});
- var in_file = fs.cwd().openFile(input, .{}) catch |err|
- fatal("unable to open '{s}': {s}", .{ input, @errorName(err) });
- defer in_file.close();
+ const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err });
+ defer input_file.close();
+
+ const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err });
- const elf_hdr = std.elf.Header.read(in_file) catch |err| switch (err) {
- error.InvalidElfMagic => fatal("not an ELF file: '{s}'", .{input}),
- else => fatal("unable to read '{s}': {s}", .{ input, @errorName(err) }),
+ var in: File.Reader = .initSize(input_file, &input_buffer, stat.size);
+
+ const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) {
+ error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }),
+ else => |e| fatal("invalid elf file: {t}", .{e}),
};
const in_ofmt = .elf;
@@ -168,16 +174,12 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
}
};
- const mode = mode: {
- if (out_fmt != .elf or only_keep_debug)
- break :mode fs.File.default_mode;
- if (in_file.stat()) |stat|
- break :mode stat.mode
- else |_|
- break :mode fs.File.default_mode;
- };
- var out_file = try fs.cwd().createFile(output, .{ .mode = mode });
- defer out_file.close();
+ const mode = if (out_fmt != .elf or only_keep_debug) fs.File.default_mode else stat.mode;
+
+ var output_file = try fs.cwd().createFile(output, .{ .mode = mode });
+ defer output_file.close();
+
+ var out = output_file.writer(&output_buffer);
switch (out_fmt) {
.hex, .raw => {
@@ -192,7 +194,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
if (set_section_flags != null)
fatal("zig objcopy: ELF to RAW or HEX copying does not support --set_section_flags", .{});
- try emitElf(arena, in_file, out_file, elf_hdr, .{
+ try emitElf(arena, &in, &out, elf_hdr, .{
.ofmt = out_fmt,
.only_section = only_section,
.pad_to = pad_to,
@@ -208,22 +210,13 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
if (pad_to) |_|
fatal("zig objcopy: ELF to ELF copying does not support --pad-to", .{});
- try stripElf(arena, in_file, out_file, elf_hdr, .{
- .strip_debug = strip_debug,
- .strip_all = strip_all,
- .only_keep_debug = only_keep_debug,
- .add_debuglink = opt_add_debuglink,
- .extract_to = opt_extract,
- .compress_debug = compress_debug_sections,
- .add_section = add_section,
- .set_section_alignment = set_section_alignment,
- .set_section_flags = set_section_flags,
- });
- return std.process.cleanExit();
+ fatal("unimplemented", .{});
},
else => fatal("unsupported output object format: {s}", .{@tagName(out_fmt)}),
}
+ try out.end();
+
if (listen) {
var stdin_reader = fs.File.stdin().reader(&stdin_buffer);
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
@@ -304,12 +297,12 @@ const SetSectionFlags = struct {
fn emitElf(
arena: Allocator,
- in_file: File,
- out_file: File,
+ in: *File.Reader,
+ out: *File.Writer,
elf_hdr: elf.Header,
options: EmitRawElfOptions,
) !void {
- var binary_elf_output = try BinaryElfOutput.parse(arena, in_file, elf_hdr);
+ var binary_elf_output = try BinaryElfOutput.parse(arena, in, elf_hdr);
defer binary_elf_output.deinit();
if (options.ofmt == .elf) {
@@ -328,8 +321,8 @@ fn emitElf(
continue;
}
- try writeBinaryElfSection(in_file, out_file, section);
- try padFile(out_file, options.pad_to);
+ try writeBinaryElfSection(in, out, section);
+ try padFile(out, options.pad_to);
return;
}
},
@@ -342,10 +335,10 @@ fn emitElf(
switch (options.ofmt) {
.raw => {
for (binary_elf_output.sections.items) |section| {
- try out_file.seekTo(section.binaryOffset);
- try writeBinaryElfSection(in_file, out_file, section);
+ try out.seekTo(section.binaryOffset);
+ try writeBinaryElfSection(in, out, section);
}
- try padFile(out_file, options.pad_to);
+ try padFile(out, options.pad_to);
},
.hex => {
if (binary_elf_output.segments.items.len == 0) return;
@@ -353,15 +346,15 @@ fn emitElf(
return error.InvalidHexfileAddressRange;
}
- var hex_writer = HexWriter{ .out_file = out_file };
+ var hex_writer = HexWriter{ .out = out };
for (binary_elf_output.segments.items) |segment| {
- try hex_writer.writeSegment(segment, in_file);
+ try hex_writer.writeSegment(segment, in);
}
if (options.pad_to) |_| {
// Padding to a size in hex files isn't applicable
return error.InvalidArgument;
}
- try hex_writer.writeEOF();
+ try hex_writer.writeEof();
},
else => unreachable,
}
@@ -399,7 +392,7 @@ const BinaryElfOutput = struct {
self.segments.deinit(self.allocator);
}
- pub fn parse(allocator: Allocator, elf_file: File, elf_hdr: elf.Header) !Self {
+ pub fn parse(allocator: Allocator, in: *File.Reader, elf_hdr: elf.Header) !Self {
var self: Self = .{
.segments = .{},
.sections = .{},
@@ -412,7 +405,7 @@ const BinaryElfOutput = struct {
self.shstrtab = blk: {
if (elf_hdr.shstrndx >= elf_hdr.shnum) break :blk null;
- var section_headers = elf_hdr.section_header_iterator(&elf_file);
+ var section_headers = elf_hdr.iterateSectionHeaders(in);
var section_counter: usize = 0;
while (section_counter < elf_hdr.shstrndx) : (section_counter += 1) {
@@ -421,18 +414,13 @@ const BinaryElfOutput = struct {
const shstrtab_shdr = (try section_headers.next()).?;
- const buffer = try allocator.alloc(u8, @intCast(shstrtab_shdr.sh_size));
- errdefer allocator.free(buffer);
-
- const num_read = try elf_file.preadAll(buffer, shstrtab_shdr.sh_offset);
- if (num_read != buffer.len) return error.EndOfStream;
-
- break :blk buffer;
+ try in.seekTo(shstrtab_shdr.sh_offset);
+ break :blk try in.interface.readAlloc(allocator, shstrtab_shdr.sh_size);
};
errdefer if (self.shstrtab) |shstrtab| allocator.free(shstrtab);
- var section_headers = elf_hdr.section_header_iterator(&elf_file);
+ var section_headers = elf_hdr.iterateSectionHeaders(in);
while (try section_headers.next()) |section| {
if (sectionValidForOutput(section)) {
const newSection = try allocator.create(BinaryElfSection);
@@ -451,7 +439,7 @@ const BinaryElfOutput = struct {
}
}
- var program_headers = elf_hdr.program_header_iterator(&elf_file);
+ var program_headers = elf_hdr.iterateProgramHeaders(in);
while (try program_headers.next()) |phdr| {
if (phdr.p_type == elf.PT_LOAD) {
const newSegment = try allocator.create(BinaryElfSegment);
@@ -539,19 +527,17 @@ const BinaryElfOutput = struct {
}
};
-fn writeBinaryElfSection(elf_file: File, out_file: File, section: *BinaryElfSection) !void {
- try out_file.writeFileAll(elf_file, .{
- .in_offset = section.elfOffset,
- .in_len = section.fileSize,
- });
+fn writeBinaryElfSection(in: *File.Reader, out: *File.Writer, section: *BinaryElfSection) !void {
+ try in.seekTo(section.elfOffset);
+ _ = try out.interface.sendFileAll(in, .limited(section.fileSize));
}
const HexWriter = struct {
prev_addr: ?u32 = null,
- out_file: File,
+ out: *File.Writer,
/// Max data bytes per line of output
- const MAX_PAYLOAD_LEN: u8 = 16;
+ const max_payload_len: u8 = 16;
fn addressParts(address: u16) [2]u8 {
const msb: u8 = @truncate(address >> 8);
@@ -627,13 +613,13 @@ const HexWriter = struct {
return (sum ^ 0xFF) +% 1;
}
- fn write(self: Record, file: File) File.WriteError!void {
+ fn write(self: Record, out: *File.Writer) !void {
const linesep = "\r\n";
// colon, (length, address, type, payload, checksum) as hex, CRLF
- const BUFSIZE = 1 + (1 + 2 + 1 + MAX_PAYLOAD_LEN + 1) * 2 + linesep.len;
+ const BUFSIZE = 1 + (1 + 2 + 1 + max_payload_len + 1) * 2 + linesep.len;
var outbuf: [BUFSIZE]u8 = undefined;
const payload_bytes = self.getPayloadBytes();
- assert(payload_bytes.len <= MAX_PAYLOAD_LEN);
+ assert(payload_bytes.len <= max_payload_len);
const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3X}{4X:0>2}" ++ linesep, .{
@as(u8, @intCast(payload_bytes.len)),
@@ -642,38 +628,37 @@ const HexWriter = struct {
payload_bytes,
self.checksum(),
});
- try file.writeAll(line);
+ try out.interface.writeAll(line);
}
};
- pub fn writeSegment(self: *HexWriter, segment: *const BinaryElfSegment, elf_file: File) !void {
- var buf: [MAX_PAYLOAD_LEN]u8 = undefined;
+ pub fn writeSegment(self: *HexWriter, segment: *const BinaryElfSegment, in: *File.Reader) !void {
+ var buf: [max_payload_len]u8 = undefined;
var bytes_read: usize = 0;
while (bytes_read < segment.fileSize) {
const row_address: u32 = @intCast(segment.physicalAddress + bytes_read);
const remaining = segment.fileSize - bytes_read;
- const to_read: usize = @intCast(@min(remaining, MAX_PAYLOAD_LEN));
- const did_read = try elf_file.preadAll(buf[0..to_read], segment.elfOffset + bytes_read);
- if (did_read < to_read) return error.UnexpectedEOF;
+ const dest = buf[0..@min(remaining, max_payload_len)];
+ try in.seekTo(segment.elfOffset + bytes_read);
+ try in.interface.readSliceAll(dest);
+ try self.writeDataRow(row_address, dest);
- try self.writeDataRow(row_address, buf[0..did_read]);
-
- bytes_read += did_read;
+ bytes_read += dest.len;
}
}
- fn writeDataRow(self: *HexWriter, address: u32, data: []const u8) File.WriteError!void {
+ fn writeDataRow(self: *HexWriter, address: u32, data: []const u8) !void {
const record = Record.Data(address, data);
if (address > 0xFFFF and (self.prev_addr == null or record.address != self.prev_addr.?)) {
- try Record.Address(address).write(self.out_file);
+ try Record.Address(address).write(self.out);
}
- try record.write(self.out_file);
+ try record.write(self.out);
self.prev_addr = @intCast(record.address + data.len);
}
- fn writeEOF(self: HexWriter) File.WriteError!void {
- try Record.EOF().write(self.out_file);
+ fn writeEof(self: HexWriter) !void {
+ try Record.EOF().write(self.out);
}
};
@@ -686,9 +671,9 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
return true;
}
-fn padFile(f: File, opt_size: ?u64) !void {
+fn padFile(out: *File.Writer, opt_size: ?u64) !void {
const size = opt_size orelse return;
- try f.setEndPos(size);
+ try out.file.setEndPos(size);
}
test "HexWriter.Record.Address has correct payload and checksum" {
@@ -732,836 +717,6 @@ test "containsValidAddressRange" {
try std.testing.expect(containsValidAddressRange(&buf));
}
-// -------------
-// ELF to ELF stripping
-
-const StripElfOptions = struct {
- extract_to: ?[]const u8 = null,
- add_debuglink: ?[]const u8 = null,
- strip_all: bool = false,
- strip_debug: bool = false,
- only_keep_debug: bool = false,
- compress_debug: bool = false,
- add_section: ?AddSection,
- set_section_alignment: ?SetSectionAlignment,
- set_section_flags: ?SetSectionFlags,
-};
-
-fn stripElf(
- allocator: Allocator,
- in_file: File,
- out_file: File,
- elf_hdr: elf.Header,
- options: StripElfOptions,
-) !void {
- const Filter = ElfFileHelper.Filter;
- const DebugLink = ElfFileHelper.DebugLink;
-
- const filter: Filter = filter: {
- if (options.only_keep_debug) break :filter .debug;
- if (options.strip_all) break :filter .program;
- if (options.strip_debug) break :filter .program_and_symbols;
- break :filter .all;
- };
-
- const filter_complement: ?Filter = blk: {
- if (options.extract_to) |_| {
- break :blk switch (filter) {
- .program => .debug_and_symbols,
- .debug => .program_and_symbols,
- .program_and_symbols => .debug,
- .debug_and_symbols => .program,
- .all => fatal("zig objcopy: nothing to extract", .{}),
- };
- } else {
- break :blk null;
- }
- };
- const debuglink_path = path: {
- if (options.add_debuglink) |path| break :path path;
- if (options.extract_to) |path| break :path path;
- break :path null;
- };
-
- switch (elf_hdr.is_64) {
- inline else => |is_64| {
- var elf_file = try ElfFile(is_64).parse(allocator, in_file, elf_hdr);
- defer elf_file.deinit();
-
- if (options.add_section) |user_section| {
- for (elf_file.sections) |section| {
- if (std.mem.eql(u8, section.name, user_section.section_name)) {
- fatal("zig objcopy: unable to add section '{s}'. Section already exists in input", .{user_section.section_name});
- }
- }
- }
-
- if (filter_complement) |flt| {
- // write the .dbg file and close it, so it can be read back to compute the debuglink checksum.
- const path = options.extract_to.?;
- const dbg_file = std.fs.cwd().createFile(path, .{}) catch |err| {
- fatal("zig objcopy: unable to create '{s}': {s}", .{ path, @errorName(err) });
- };
- defer dbg_file.close();
-
- try elf_file.emit(allocator, dbg_file, in_file, .{ .section_filter = flt, .compress_debug = options.compress_debug });
- }
-
- const debuglink: ?DebugLink = if (debuglink_path) |path| ElfFileHelper.createDebugLink(path) else null;
- try elf_file.emit(allocator, out_file, in_file, .{
- .section_filter = filter,
- .debuglink = debuglink,
- .compress_debug = options.compress_debug,
- .add_section = options.add_section,
- .set_section_alignment = options.set_section_alignment,
- .set_section_flags = options.set_section_flags,
- });
- },
- }
-}
-
-// note: this is "a minimal effort implementation"
-// It doesn't support all possibile elf files: some sections type may need fixups, the program header may need fix up, ...
-// It was written for a specific use case (strip debug info to a sperate file, for linux 64-bits executables built with `zig` or `zig c++` )
-// It moves and reoders the sections as little as possible to avoid having to do fixups.
-// TODO: support non-native endianess
-
-fn ElfFile(comptime is_64: bool) type {
- const Elf_Ehdr = if (is_64) elf.Elf64_Ehdr else elf.Elf32_Ehdr;
- const Elf_Phdr = if (is_64) elf.Elf64_Phdr else elf.Elf32_Phdr;
- const Elf_Shdr = if (is_64) elf.Elf64_Shdr else elf.Elf32_Shdr;
- const Elf_Chdr = if (is_64) elf.Elf64_Chdr else elf.Elf32_Chdr;
- const Elf_Sym = if (is_64) elf.Elf64_Sym else elf.Elf32_Sym;
- const Elf_OffSize = if (is_64) elf.Elf64_Off else elf.Elf32_Off;
-
- return struct {
- raw_elf_header: Elf_Ehdr,
- program_segments: []const Elf_Phdr,
- sections: []const Section,
- arena: std.heap.ArenaAllocator,
-
- const SectionCategory = ElfFileHelper.SectionCategory;
- const section_memory_align: std.mem.Alignment = .of(Elf_Sym); // most restrictive of what we may load in memory
- const Section = struct {
- section: Elf_Shdr,
- name: []const u8 = "",
- segment: ?*const Elf_Phdr = null, // if the section is used by a program segment (there can be more than one)
- payload: ?[]align(section_memory_align.toByteUnits()) const u8 = null, // if we need the data in memory
- category: SectionCategory = .none, // should the section be kept in the exe or stripped to the debug database, or both.
- };
-
- const Self = @This();
-
- pub fn parse(gpa: Allocator, in_file: File, header: elf.Header) !Self {
- var arena = std.heap.ArenaAllocator.init(gpa);
- errdefer arena.deinit();
- const allocator = arena.allocator();
-
- var raw_header: Elf_Ehdr = undefined;
- {
- const bytes_read = try in_file.preadAll(std.mem.asBytes(&raw_header), 0);
- if (bytes_read < @sizeOf(Elf_Ehdr))
- return error.TRUNCATED_ELF;
- }
-
- // program header: list of segments
- const program_segments = blk: {
- if (@sizeOf(Elf_Phdr) != header.phentsize)
- fatal("zig objcopy: unsupported ELF file, unexpected phentsize ({d})", .{header.phentsize});
-
- const program_header = try allocator.alloc(Elf_Phdr, header.phnum);
- const bytes_read = try in_file.preadAll(std.mem.sliceAsBytes(program_header), header.phoff);
- if (bytes_read < @sizeOf(Elf_Phdr) * header.phnum)
- return error.TRUNCATED_ELF;
- break :blk program_header;
- };
-
- // section header
- const sections = blk: {
- if (@sizeOf(Elf_Shdr) != header.shentsize)
- fatal("zig objcopy: unsupported ELF file, unexpected shentsize ({d})", .{header.shentsize});
-
- const section_header = try allocator.alloc(Section, header.shnum);
-
- const raw_section_header = try allocator.alloc(Elf_Shdr, header.shnum);
- defer allocator.free(raw_section_header);
- const bytes_read = try in_file.preadAll(std.mem.sliceAsBytes(raw_section_header), header.shoff);
- if (bytes_read < @sizeOf(Elf_Phdr) * header.shnum)
- return error.TRUNCATED_ELF;
-
- for (section_header, raw_section_header) |*section, hdr| {
- section.* = .{ .section = hdr };
- }
- break :blk section_header;
- };
-
- // load data to memory for some sections:
- // string tables for access
- // sections than need modifications when other sections move.
- for (sections, 0..) |*section, idx| {
- const need_data = switch (section.section.sh_type) {
- elf.DT_VERSYM => true,
- elf.SHT_SYMTAB, elf.SHT_DYNSYM => true,
- else => false,
- };
- const need_strings = (idx == header.shstrndx);
-
- if (need_data or need_strings) {
- const buffer = try allocator.alignedAlloc(u8, section_memory_align, @intCast(section.section.sh_size));
- const bytes_read = try in_file.preadAll(buffer, section.section.sh_offset);
- if (bytes_read != section.section.sh_size) return error.TRUNCATED_ELF;
- section.payload = buffer;
- }
- }
-
- // fill-in sections info:
- // resolve the name
- // find if a program segment uses the section
- // categorize sections usage (used by program segments, debug datadase, common metadata, symbol table)
- for (sections) |*section| {
- section.segment = for (program_segments) |*seg| {
- if (sectionWithinSegment(section.section, seg.*)) break seg;
- } else null;
-
- if (section.section.sh_name != 0 and header.shstrndx != elf.SHN_UNDEF)
- section.name = std.mem.span(@as([*:0]const u8, @ptrCast(&sections[header.shstrndx].payload.?[section.section.sh_name])));
-
- const category_from_program: SectionCategory = if (section.segment != null) .exe else .debug;
- section.category = switch (section.section.sh_type) {
- elf.SHT_NOTE => .common,
- elf.SHT_SYMTAB => .symbols, // "strip all" vs "strip only debug"
- elf.SHT_DYNSYM => .exe,
- elf.SHT_PROGBITS => cat: {
- if (std.mem.eql(u8, section.name, ".comment")) break :cat .exe;
- if (std.mem.eql(u8, section.name, ".gnu_debuglink")) break :cat .none;
- break :cat category_from_program;
- },
- elf.SHT_LOPROC...elf.SHT_HIPROC => .common, // don't strip unknown sections
- elf.SHT_LOUSER...elf.SHT_HIUSER => .common, // don't strip unknown sections
- else => category_from_program,
- };
- }
-
- sections[0].category = .common; // mandatory null section
- if (header.shstrndx != elf.SHN_UNDEF)
- sections[header.shstrndx].category = .common; // string table for the headers
-
- // recursively propagate section categories to their linked sections, so that they are kept together
- var dirty: u1 = 1;
- while (dirty != 0) {
- dirty = 0;
-
- for (sections) |*section| {
- if (section.section.sh_link != elf.SHN_UNDEF)
- dirty |= ElfFileHelper.propagateCategory(&sections[section.section.sh_link].category, section.category);
- if ((section.section.sh_flags & elf.SHF_INFO_LINK) != 0 and section.section.sh_info != elf.SHN_UNDEF)
- dirty |= ElfFileHelper.propagateCategory(&sections[section.section.sh_info].category, section.category);
- }
- }
-
- return Self{
- .arena = arena,
- .raw_elf_header = raw_header,
- .program_segments = program_segments,
- .sections = sections,
- };
- }
-
- pub fn deinit(self: *Self) void {
- self.arena.deinit();
- }
-
- const Filter = ElfFileHelper.Filter;
- const DebugLink = ElfFileHelper.DebugLink;
- const EmitElfOptions = struct {
- section_filter: Filter = .all,
- debuglink: ?DebugLink = null,
- compress_debug: bool = false,
- add_section: ?AddSection = null,
- set_section_alignment: ?SetSectionAlignment = null,
- set_section_flags: ?SetSectionFlags = null,
- };
- fn emit(self: *const Self, gpa: Allocator, out_file: File, in_file: File, options: EmitElfOptions) !void {
- var arena = std.heap.ArenaAllocator.init(gpa);
- defer arena.deinit();
- const allocator = arena.allocator();
-
- // when emitting the stripped exe:
- // - unused sections are removed
- // when emitting the debug file:
- // - all sections are kept, but some are emptied and their types is changed to SHT_NOBITS
- // the program header is kept unchanged. (`strip` does update it, but `eu-strip` does not, and it still works)
-
- const Update = struct {
- action: ElfFileHelper.Action,
-
- // remap the indexs after omitting the filtered sections
- remap_idx: u16,
-
- // optionally overrides the payload from the source file
- payload: ?[]align(section_memory_align.toByteUnits()) const u8 = null,
- section: ?Elf_Shdr = null,
- };
- const sections_update = try allocator.alloc(Update, self.sections.len);
- const new_shnum = blk: {
- var next_idx: u16 = 0;
- for (self.sections, sections_update) |section, *update| {
- const action = ElfFileHelper.selectAction(section.category, options.section_filter);
- const remap_idx = idx: {
- if (action == .strip) break :idx elf.SHN_UNDEF;
- next_idx += 1;
- break :idx next_idx - 1;
- };
- update.* = Update{ .action = action, .remap_idx = remap_idx };
- }
-
- if (options.debuglink != null)
- next_idx += 1;
-
- if (options.add_section != null) {
- next_idx += 1;
- }
-
- break :blk next_idx;
- };
-
- // add a ".gnu_debuglink" to the string table if needed
- const debuglink_name: u32 = blk: {
- if (options.debuglink == null) break :blk elf.SHN_UNDEF;
- if (self.raw_elf_header.e_shstrndx == elf.SHN_UNDEF)
- fatal("zig objcopy: no strtab, cannot add the debuglink section", .{}); // TODO add the section if needed?
-
- const strtab = &self.sections[self.raw_elf_header.e_shstrndx];
- const update = &sections_update[self.raw_elf_header.e_shstrndx];
-
- const name: []const u8 = ".gnu_debuglink";
- const new_offset: u32 = @intCast(strtab.payload.?.len);
- const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1);
- @memcpy(buf[0..new_offset], strtab.payload.?);
- @memcpy(buf[new_offset..][0..name.len], name);
- buf[new_offset + name.len] = 0;
-
- assert(update.action == .keep);
- update.payload = buf;
-
- break :blk new_offset;
- };
-
- // add user section to the string table if needed
- const user_section_name: u32 = blk: {
- if (options.add_section == null) break :blk elf.SHN_UNDEF;
- if (self.raw_elf_header.e_shstrndx == elf.SHN_UNDEF)
- fatal("zig objcopy: no strtab, cannot add the user section", .{}); // TODO add the section if needed?
-
- const strtab = &self.sections[self.raw_elf_header.e_shstrndx];
- const update = &sections_update[self.raw_elf_header.e_shstrndx];
-
- const name = options.add_section.?.section_name;
- const new_offset: u32 = @intCast(strtab.payload.?.len);
- const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1);
- @memcpy(buf[0..new_offset], strtab.payload.?);
- @memcpy(buf[new_offset..][0..name.len], name);
- buf[new_offset + name.len] = 0;
-
- assert(update.action == .keep);
- update.payload = buf;
-
- break :blk new_offset;
- };
-
- // maybe compress .debug sections
- if (options.compress_debug) {
- for (self.sections[1..], sections_update[1..]) |section, *update| {
- if (update.action != .keep) continue;
- if (!std.mem.startsWith(u8, section.name, ".debug_")) continue;
- if ((section.section.sh_flags & elf.SHF_COMPRESSED) != 0) continue; // already compressed
-
- const chdr = Elf_Chdr{
- .ch_type = elf.COMPRESS.ZLIB,
- .ch_size = section.section.sh_size,
- .ch_addralign = section.section.sh_addralign,
- };
-
- const compressed_payload = try ElfFileHelper.tryCompressSection(allocator, in_file, section.section.sh_offset, section.section.sh_size, std.mem.asBytes(&chdr));
- if (compressed_payload) |payload| {
- update.payload = payload;
- update.section = section.section;
- update.section.?.sh_addralign = @alignOf(Elf_Chdr);
- update.section.?.sh_size = @intCast(payload.len);
- update.section.?.sh_flags |= elf.SHF_COMPRESSED;
- }
- }
- }
-
- var cmdbuf = std.ArrayList(ElfFileHelper.WriteCmd).init(allocator);
- defer cmdbuf.deinit();
- try cmdbuf.ensureUnusedCapacity(3 + new_shnum);
- var eof_offset: Elf_OffSize = 0; // track the end of the data written so far.
-
- // build the updated headers
- // nb: updated_elf_header will be updated before the actual write
- var updated_elf_header = self.raw_elf_header;
- if (updated_elf_header.e_shstrndx != elf.SHN_UNDEF)
- updated_elf_header.e_shstrndx = sections_update[updated_elf_header.e_shstrndx].remap_idx;
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = std.mem.asBytes(&updated_elf_header), .out_offset = 0 } });
- eof_offset = @sizeOf(Elf_Ehdr);
-
- // program header as-is.
- // nb: for only-debug files, removing it appears to work, but is invalid by ELF specifcation.
- {
- assert(updated_elf_header.e_phoff == @sizeOf(Elf_Ehdr));
- const data = std.mem.sliceAsBytes(self.program_segments);
- assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum);
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_phoff } });
- eof_offset = updated_elf_header.e_phoff + @as(Elf_OffSize, @intCast(data.len));
- }
-
- // update sections and queue payload writes
- const updated_section_header = blk: {
- const dest_sections = try allocator.alloc(Elf_Shdr, new_shnum);
-
- {
- // the ELF format doesn't specify the order for all sections.
- // this code only supports when they are in increasing file order.
- var offset: u64 = eof_offset;
- for (self.sections[1..]) |section| {
- if (section.section.sh_type == elf.SHT_NOBITS)
- continue;
- if (section.section.sh_offset < offset) {
- fatal("zig objcopy: unsupported ELF file", .{});
- }
- offset = section.section.sh_offset;
- }
- }
-
- dest_sections[0] = self.sections[0].section;
-
- var dest_section_idx: u32 = 1;
- for (self.sections[1..], sections_update[1..]) |section, update| {
- if (update.action == .strip) continue;
- assert(update.remap_idx == dest_section_idx);
-
- const src = if (update.section) |*s| s else &section.section;
- const dest = &dest_sections[dest_section_idx];
- const payload = if (update.payload) |data| data else section.payload;
- dest_section_idx += 1;
-
- dest.* = src.*;
-
- if (src.sh_link != elf.SHN_UNDEF)
- dest.sh_link = sections_update[src.sh_link].remap_idx;
- if ((src.sh_flags & elf.SHF_INFO_LINK) != 0 and src.sh_info != elf.SHN_UNDEF)
- dest.sh_info = sections_update[src.sh_info].remap_idx;
-
- if (payload) |data|
- dest.sh_size = @intCast(data.len);
-
- const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign;
- dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign);
- if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE and dest.sh_type != elf.SHT_NOBITS) {
- if (src.sh_offset > dest.sh_offset) {
- dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments
- } else {
- fatal("zig objcopy: cannot adjust program segments", .{});
- }
- }
- assert(dest.sh_addr % addralign == dest.sh_offset % addralign);
-
- if (update.action == .empty)
- dest.sh_type = elf.SHT_NOBITS;
-
- if (dest.sh_type != elf.SHT_NOBITS) {
- if (payload) |src_data| {
- // update sections payload and write
- const dest_data = switch (src.sh_type) {
- elf.DT_VERSYM => dst_data: {
- const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
- @memcpy(data, src_data);
-
- const defs = @as([*]elf.Verdef, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(elf.Verdef)];
- for (defs) |*def| switch (def.ndx) {
- .LOCAL, .GLOBAL => {},
- else => def.ndx = @enumFromInt(sections_update[src.sh_info].remap_idx),
- };
-
- break :dst_data data;
- },
- elf.SHT_SYMTAB, elf.SHT_DYNSYM => dst_data: {
- const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
- @memcpy(data, src_data);
-
- const syms = @as([*]Elf_Sym, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Sym)];
- for (syms) |*sym| {
- if (sym.st_shndx != elf.SHN_UNDEF and sym.st_shndx < elf.SHN_LORESERVE)
- sym.st_shndx = sections_update[sym.st_shndx].remap_idx;
- }
-
- break :dst_data data;
- },
- else => src_data,
- };
-
- assert(dest_data.len == dest.sh_size);
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = dest_data, .out_offset = dest.sh_offset } });
- eof_offset = dest.sh_offset + dest.sh_size;
- } else {
- // direct contents copy
- cmdbuf.appendAssumeCapacity(.{ .copy_range = .{ .in_offset = src.sh_offset, .len = dest.sh_size, .out_offset = dest.sh_offset } });
- eof_offset = dest.sh_offset + dest.sh_size;
- }
- } else {
- // account for alignment padding even in empty sections to keep logical section order
- eof_offset = dest.sh_offset;
- }
- }
-
- // add a ".gnu_debuglink" section
- if (options.debuglink) |link| {
- const payload = payload: {
- const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4);
- const buf = try allocator.alignedAlloc(u8, .@"4", crc_offset + 4);
- @memcpy(buf[0..link.name.len], link.name);
- @memset(buf[link.name.len..crc_offset], 0);
- @memcpy(buf[crc_offset..], std.mem.asBytes(&link.crc32));
- break :payload buf;
- };
-
- dest_sections[dest_section_idx] = Elf_Shdr{
- .sh_name = debuglink_name,
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = eof_offset,
- .sh_size = @intCast(payload.len),
- .sh_link = elf.SHN_UNDEF,
- .sh_info = elf.SHN_UNDEF,
- .sh_addralign = 4,
- .sh_entsize = 0,
- };
- dest_section_idx += 1;
-
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = payload, .out_offset = eof_offset } });
- eof_offset += @as(Elf_OffSize, @intCast(payload.len));
- }
-
- // --add-section
- if (options.add_section) |add_section| {
- var section_file = fs.cwd().openFile(add_section.file_path, .{}) catch |err|
- fatal("unable to open '{s}': {s}", .{ add_section.file_path, @errorName(err) });
- defer section_file.close();
-
- const payload = try section_file.readToEndAlloc(arena.allocator(), std.math.maxInt(usize));
-
- dest_sections[dest_section_idx] = Elf_Shdr{
- .sh_name = user_section_name,
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = eof_offset,
- .sh_size = @intCast(payload.len),
- .sh_link = elf.SHN_UNDEF,
- .sh_info = elf.SHN_UNDEF,
- .sh_addralign = 4,
- .sh_entsize = 0,
- };
- dest_section_idx += 1;
-
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = payload, .out_offset = eof_offset } });
- eof_offset += @as(Elf_OffSize, @intCast(payload.len));
- }
-
- assert(dest_section_idx == new_shnum);
- break :blk dest_sections;
- };
-
- // --set-section-alignment: overwrite alignment
- if (options.set_section_alignment) |set_align| {
- if (self.raw_elf_header.e_shstrndx == elf.SHN_UNDEF)
- fatal("zig objcopy: no strtab, cannot add the user section", .{}); // TODO add the section if needed?
-
- const strtab = &sections_update[self.raw_elf_header.e_shstrndx];
- for (updated_section_header) |*section| {
- const section_name = std.mem.span(@as([*:0]const u8, @ptrCast(&strtab.payload.?[section.sh_name])));
- if (std.mem.eql(u8, section_name, set_align.section_name)) {
- section.sh_addralign = set_align.alignment;
- break;
- }
- } else std.log.warn("Skipping --set-section-alignment. Section '{s}' not found", .{set_align.section_name});
- }
-
- // --set-section-flags: overwrite flags
- if (options.set_section_flags) |set_flags| {
- if (self.raw_elf_header.e_shstrndx == elf.SHN_UNDEF)
- fatal("zig objcopy: no strtab, cannot add the user section", .{}); // TODO add the section if needed?
-
- const strtab = &sections_update[self.raw_elf_header.e_shstrndx];
- for (updated_section_header) |*section| {
- const section_name = std.mem.span(@as([*:0]const u8, @ptrCast(&strtab.payload.?[section.sh_name])));
- if (std.mem.eql(u8, section_name, set_flags.section_name)) {
- section.sh_flags = std.elf.SHF_WRITE; // default is writable cleared by "readonly"
- const f = set_flags.flags;
-
- // Supporting a subset of GNU and LLVM objcopy for ELF only
- // GNU:
- // alloc: add SHF_ALLOC
- // contents: if section is SHT_NOBITS, set SHT_PROGBITS, otherwise do nothing
- // load: if section is SHT_NOBITS, set SHT_PROGBITS, otherwise do nothing (same as contents)
- // noload: not ELF relevant
- // readonly: clear default SHF_WRITE flag
- // code: add SHF_EXECINSTR
- // data: not ELF relevant
- // rom: ignored
- // exclude: add SHF_EXCLUDE
- // share: not ELF relevant
- // debug: not ELF relevant
- // large: add SHF_X86_64_LARGE. Fatal error if target is not x86_64
- if (f.alloc) section.sh_flags |= std.elf.SHF_ALLOC;
- if (f.contents or f.load) {
- if (section.sh_type == std.elf.SHT_NOBITS) section.sh_type = std.elf.SHT_PROGBITS;
- }
- if (f.readonly) section.sh_flags &= ~@as(@TypeOf(section.sh_type), std.elf.SHF_WRITE);
- if (f.code) section.sh_flags |= std.elf.SHF_EXECINSTR;
- if (f.exclude) section.sh_flags |= std.elf.SHF_EXCLUDE;
- if (f.large) {
- if (updated_elf_header.e_machine != std.elf.EM.X86_64)
- fatal("zig objcopy: 'large' section flag is only supported on x86_64 targets", .{});
- section.sh_flags |= std.elf.SHF_X86_64_LARGE;
- }
-
- // LLVM:
- // merge: add SHF_MERGE
- // strings: add SHF_STRINGS
- if (f.merge) section.sh_flags |= std.elf.SHF_MERGE;
- if (f.strings) section.sh_flags |= std.elf.SHF_STRINGS;
- break;
- }
- } else std.log.warn("Skipping --set-section-flags. Section '{s}' not found", .{set_flags.section_name});
- }
-
- // write the section header at the tail
- {
- const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr));
-
- const data = std.mem.sliceAsBytes(updated_section_header);
- assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum);
- updated_elf_header.e_shoff = offset;
- updated_elf_header.e_shnum = new_shnum;
-
- cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_shoff } });
- }
-
- try ElfFileHelper.write(allocator, out_file, in_file, cmdbuf.items);
- }
-
- fn sectionWithinSegment(section: Elf_Shdr, segment: Elf_Phdr) bool {
- const file_size = if (section.sh_type == elf.SHT_NOBITS) 0 else section.sh_size;
- return segment.p_offset <= section.sh_offset and (segment.p_offset + segment.p_filesz) >= (section.sh_offset + file_size);
- }
- };
-}
-
-const ElfFileHelper = struct {
- const DebugLink = struct { name: []const u8, crc32: u32 };
- const Filter = enum { all, program, debug, program_and_symbols, debug_and_symbols };
-
- const SectionCategory = enum { common, exe, debug, symbols, none };
- fn propagateCategory(cur: *SectionCategory, new: SectionCategory) u1 {
- const cat: SectionCategory = switch (cur.*) {
- .none => new,
- .common => .common,
- .debug => switch (new) {
- .none, .debug => .debug,
- else => new,
- },
- .exe => switch (new) {
- .common => .common,
- .none, .debug, .exe => .exe,
- .symbols => .exe,
- },
- .symbols => switch (new) {
- .none, .common, .debug, .exe => unreachable,
- .symbols => .symbols,
- },
- };
-
- if (cur.* != cat) {
- cur.* = cat;
- return 1;
- } else {
- return 0;
- }
- }
-
- const Action = enum { keep, strip, empty };
- fn selectAction(category: SectionCategory, filter: Filter) Action {
- if (category == .none) return .strip;
- return switch (filter) {
- .all => switch (category) {
- .none => .strip,
- else => .keep,
- },
- .program => switch (category) {
- .common, .exe => .keep,
- else => .strip,
- },
- .program_and_symbols => switch (category) {
- .common, .exe, .symbols => .keep,
- else => .strip,
- },
- .debug => switch (category) {
- .exe, .symbols => .empty,
- .none => .strip,
- else => .keep,
- },
- .debug_and_symbols => switch (category) {
- .exe => .empty,
- .none => .strip,
- else => .keep,
- },
- };
- }
-
- const WriteCmd = union(enum) {
- copy_range: struct { in_offset: u64, len: u64, out_offset: u64 },
- write_data: struct { data: []const u8, out_offset: u64 },
- };
- fn write(allocator: Allocator, out_file: File, in_file: File, cmds: []const WriteCmd) !void {
- // consolidate holes between writes:
- // by coping original padding data from in_file (by fusing contiguous ranges)
- // by writing zeroes otherwise
- const zeroes = [1]u8{0} ** 4096;
- var consolidated = std.ArrayList(WriteCmd).init(allocator);
- defer consolidated.deinit();
- try consolidated.ensureUnusedCapacity(cmds.len * 2);
- var offset: u64 = 0;
- var fused_cmd: ?WriteCmd = null;
- for (cmds) |cmd| {
- switch (cmd) {
- .write_data => |data| {
- assert(data.out_offset >= offset);
- if (fused_cmd) |prev| {
- consolidated.appendAssumeCapacity(prev);
- fused_cmd = null;
- }
- if (data.out_offset > offset) {
- consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(data.out_offset - offset)], .out_offset = offset } });
- }
- consolidated.appendAssumeCapacity(cmd);
- offset = data.out_offset + data.data.len;
- },
- .copy_range => |range| {
- assert(range.out_offset >= offset);
- if (fused_cmd) |prev| {
- if (range.in_offset >= prev.copy_range.in_offset + prev.copy_range.len and (range.out_offset - prev.copy_range.out_offset == range.in_offset - prev.copy_range.in_offset)) {
- fused_cmd = .{ .copy_range = .{
- .in_offset = prev.copy_range.in_offset,
- .out_offset = prev.copy_range.out_offset,
- .len = (range.out_offset + range.len) - prev.copy_range.out_offset,
- } };
- } else {
- consolidated.appendAssumeCapacity(prev);
- if (range.out_offset > offset) {
- consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(range.out_offset - offset)], .out_offset = offset } });
- }
- fused_cmd = cmd;
- }
- } else {
- fused_cmd = cmd;
- }
- offset = range.out_offset + range.len;
- },
- }
- }
- if (fused_cmd) |cmd| {
- consolidated.appendAssumeCapacity(cmd);
- }
-
- // write the output file
- for (consolidated.items) |cmd| {
- switch (cmd) {
- .write_data => |data| {
- var iovec = [_]std.posix.iovec_const{.{ .base = data.data.ptr, .len = data.data.len }};
- try out_file.pwritevAll(&iovec, data.out_offset);
- },
- .copy_range => |range| {
- const copied_bytes = try in_file.copyRangeAll(range.in_offset, out_file, range.out_offset, range.len);
- if (copied_bytes < range.len) return error.TRUNCATED_ELF;
- },
- }
- }
- }
-
- fn tryCompressSection(allocator: Allocator, in_file: File, offset: u64, size: u64, prefix: []const u8) !?[]align(8) const u8 {
- if (size < prefix.len) return null;
-
- try in_file.seekTo(offset);
- var section_reader = std.io.limitedReader(in_file.deprecatedReader(), size);
-
- // allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed.
- const compressed_data = try allocator.alignedAlloc(u8, .@"8", @intCast(size));
- var compressed_stream = std.io.fixedBufferStream(compressed_data);
-
- try compressed_stream.writer().writeAll(prefix);
-
- {
- var compressor = try std.compress.zlib.compressor(compressed_stream.writer(), .{});
-
- var buf: [8000]u8 = undefined;
- while (true) {
- const bytes_read = try section_reader.read(&buf);
- if (bytes_read == 0) break;
- const bytes_written = compressor.write(buf[0..bytes_read]) catch |err| switch (err) {
- error.NoSpaceLeft => {
- allocator.free(compressed_data);
- return null;
- },
- else => return err,
- };
- std.debug.assert(bytes_written == bytes_read);
- }
- compressor.finish() catch |err| switch (err) {
- error.NoSpaceLeft => {
- allocator.free(compressed_data);
- return null;
- },
- else => return err,
- };
- }
-
- const compressed_len: usize = @intCast(compressed_stream.getPos() catch unreachable);
- const data = allocator.realloc(compressed_data, compressed_len) catch compressed_data;
- return data[0..compressed_len];
- }
-
- fn createDebugLink(path: []const u8) DebugLink {
- const file = std.fs.cwd().openFile(path, .{}) catch |err| {
- fatal("zig objcopy: could not open `{s}`: {s}\n", .{ path, @errorName(err) });
- };
- defer file.close();
-
- const crc = ElfFileHelper.computeFileCrc(file) catch |err| {
- fatal("zig objcopy: could not read `{s}`: {s}\n", .{ path, @errorName(err) });
- };
- return .{
- .name = std.fs.path.basename(path),
- .crc32 = crc,
- };
- }
-
- fn computeFileCrc(file: File) !u32 {
- var buf: [8000]u8 = undefined;
-
- try file.seekTo(0);
- var hasher = std.hash.Crc32.init();
- while (true) {
- const bytes_read = try file.read(&buf);
- if (bytes_read == 0) break;
- hasher.update(buf[0..bytes_read]);
- }
- return hasher.final();
- }
-};
-
const SectionFlags = packed struct {
alloc: bool = false,
contents: bool = false,
diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig
index b5bc742717..74a9c65849 100644
--- a/lib/compiler/std-docs.zig
+++ b/lib/compiler/std-docs.zig
@@ -60,7 +60,9 @@ pub fn main() !void {
const should_open_browser = force_open_browser orelse (listen_port == 0);
const address = std.net.Address.parseIp("127.0.0.1", listen_port) catch unreachable;
- var http_server = try address.listen(.{});
+ var http_server = try address.listen(.{
+ .reuse_address = true,
+ });
const port = http_server.listen_address.in.getPort();
const url_with_newline = try std.fmt.allocPrint(arena, "http://127.0.0.1:{d}/\n", .{port});
std.fs.File.stdout().writeAll(url_with_newline) catch {};
@@ -189,7 +191,11 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
var walker = try std_dir.walk(gpa);
defer walker.deinit();
- var archiver = std.tar.writer(response.writer());
+ var adapter_buffer: [500]u8 = undefined;
+ var response_writer = response.writer().adaptToNewApi();
+ response_writer.new_interface.buffer = &adapter_buffer;
+
+ var archiver: std.tar.Writer = .{ .underlying_writer = &response_writer.new_interface };
archiver.prefix = "std";
while (try walker.next()) |entry| {
@@ -204,7 +210,13 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
}
var file = try entry.dir.openFile(entry.basename, .{});
defer file.close();
- try archiver.writeFile(entry.path, file);
+ const stat = try file.stat();
+ var file_reader: std.fs.File.Reader = .{
+ .file = file,
+ .interface = std.fs.File.Reader.initInterface(&.{}),
+ .size = stat.size,
+ };
+ try archiver.writeFile(entry.path, &file_reader, stat.mtime);
}
{
@@ -217,6 +229,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
// intentionally omitting the pointless trailer
//try archiver.finish();
+ try response_writer.new_interface.flush();
try response.end();
}
@@ -307,21 +320,17 @@ fn buildWasmBinary(
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
- const Header = std.zig.Server.Message.Header;
var result: ?Cache.Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
- const stdout = poller.fifo(.stdout);
+ const stdout = poller.reader(.stdout);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try poller.poll())) break :poll;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try poller.poll())) break :poll;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
@@ -361,15 +370,11 @@ fn buildWasmBinary(
},
else => {}, // ignore other messages
}
-
- stdout.discard(body.len);
}
- const stderr = poller.fifo(.stderr);
- if (stderr.readableLength() > 0) {
- const owned_stderr = try stderr.toOwnedSlice();
- defer gpa.free(owned_stderr);
- std.debug.print("{s}", .{owned_stderr});
+ const stderr = poller.reader(.stderr);
+ if (stderr.bufferedLen() > 0) {
+ std.debug.print("{s}", .{stderr.buffered()});
}
// Send EOF to stdin.
diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig
index 8b60a75399..e618f72d2f 100644
--- a/lib/compiler/test_runner.zig
+++ b/lib/compiler/test_runner.zig
@@ -16,6 +16,7 @@ var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
const crippled = switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> true,
@@ -287,13 +288,14 @@ pub fn log(
/// work-in-progress backends can handle it.
pub fn mainSimple() anyerror!void {
@disableInstrumentation();
- // is the backend capable of printing to stderr?
- const enable_print = switch (builtin.zig_backend) {
+ // is the backend capable of calling `std.fs.File.writeAll`?
+ const enable_write = switch (builtin.zig_backend) {
+ .stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
- // is the backend capable of using std.fmt.format to print a summary at the end?
- const print_summary = switch (builtin.zig_backend) {
- .stage2_riscv64 => true,
+ // is the backend capable of calling `std.Io.Writer.print`?
+ const enable_print = switch (builtin.zig_backend) {
+ .stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
@@ -302,34 +304,31 @@ pub fn mainSimple() anyerror!void {
var failed: u64 = 0;
// we don't want to bring in File and Writer if the backend doesn't support it
- const stderr = if (comptime enable_print) std.fs.File.stderr() else {};
+ const stdout = if (enable_write) std.fs.File.stdout() else {};
for (builtin.test_functions) |test_fn| {
+ if (enable_write) {
+ stdout.writeAll(test_fn.name) catch {};
+ stdout.writeAll("... ") catch {};
+ }
if (test_fn.func()) |_| {
- if (enable_print) {
- stderr.writeAll(test_fn.name) catch {};
- stderr.writeAll("... ") catch {};
- stderr.writeAll("PASS\n") catch {};
- }
+ if (enable_write) stdout.writeAll("PASS\n") catch {};
} else |err| {
- if (enable_print) {
- stderr.writeAll(test_fn.name) catch {};
- stderr.writeAll("... ") catch {};
- }
if (err != error.SkipZigTest) {
- if (enable_print) stderr.writeAll("FAIL\n") catch {};
+ if (enable_write) stdout.writeAll("FAIL\n") catch {};
failed += 1;
- if (!enable_print) return err;
+ if (!enable_write) return err;
continue;
}
- if (enable_print) stderr.writeAll("SKIP\n") catch {};
+ if (enable_write) stdout.writeAll("SKIP\n") catch {};
skipped += 1;
continue;
}
passed += 1;
}
- if (enable_print and print_summary) {
- stderr.deprecatedWriter().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
+ if (enable_print) {
+ var stdout_writer = stdout.writer(&.{});
+ stdout_writer.interface.print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
}
if (failed != 0) std.process.exit(1);
}
diff --git a/lib/compiler_rt.zig b/lib/compiler_rt.zig
index 46db464fd9..b8723c56ee 100644
--- a/lib/compiler_rt.zig
+++ b/lib/compiler_rt.zig
@@ -249,12 +249,12 @@ comptime {
_ = @import("compiler_rt/hexagon.zig");
if (@import("builtin").object_format != .c) {
- _ = @import("compiler_rt/atomics.zig");
+ if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/atomics.zig");
_ = @import("compiler_rt/stack_probe.zig");
// macOS has these functions inside libSystem.
if (builtin.cpu.arch.isAARCH64() and !builtin.os.tag.isDarwin()) {
- _ = @import("compiler_rt/aarch64_outline_atomics.zig");
+ if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/aarch64_outline_atomics.zig");
}
_ = @import("compiler_rt/memcpy.zig");
diff --git a/lib/compiler_rt/addo.zig b/lib/compiler_rt/addo.zig
index beb6249223..610d620690 100644
--- a/lib/compiler_rt/addo.zig
+++ b/lib/compiler_rt/addo.zig
@@ -1,6 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("./common.zig");
pub const panic = @import("common.zig").panic;
@@ -16,7 +14,7 @@ comptime {
// - addoXi4_generic as default
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
overflow.* = 0;
const sum: ST = a +% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
diff --git a/lib/compiler_rt/addoti4_test.zig b/lib/compiler_rt/addoti4_test.zig
index dc85830df9..d031d1d428 100644
--- a/lib/compiler_rt/addoti4_test.zig
+++ b/lib/compiler_rt/addoti4_test.zig
@@ -1,4 +1,5 @@
const addv = @import("addo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -23,6 +24,8 @@ fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "addoti4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig
index e4a0a9d00d..c43d35602c 100644
--- a/lib/compiler_rt/clear_cache.zig
+++ b/lib/compiler_rt/clear_cache.zig
@@ -97,8 +97,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
.nbytes = end - start,
.whichcache = 3, // ICACHE | DCACHE
};
- asm volatile (
- \\ syscall
+ asm volatile ("syscall"
:
: [_] "{$2}" (165), // nr = SYS_sysarch
[_] "{$4}" (0), // op = MIPS_CACHEFLUSH
@@ -116,11 +115,8 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
} else if (arm64 and !apple) {
// Get Cache Type Info.
// TODO memoize this?
- var ctr_el0: u64 = 0;
- asm volatile (
- \\mrs %[x], ctr_el0
- \\
- : [x] "=r" (ctr_el0),
+ const ctr_el0 = asm volatile ("mrs %[ctr_el0], ctr_el0"
+ : [ctr_el0] "=r" (-> u64),
);
// The DC and IC instructions must use 64-bit registers so we don't use
// uintptr_t in case this runs in an IPL32 environment.
@@ -187,9 +183,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
exportIt();
} else if (os == .linux and loongarch) {
// See: https://github.com/llvm/llvm-project/blob/cf54cae26b65fc3201eff7200ffb9b0c9e8f9a13/compiler-rt/lib/builtins/clear_cache.c#L94-L95
- asm volatile (
- \\ ibar 0
- );
+ asm volatile ("ibar 0");
exportIt();
}
diff --git a/lib/compiler_rt/cmp.zig b/lib/compiler_rt/cmp.zig
index e1273aa622..67cb5b0938 100644
--- a/lib/compiler_rt/cmp.zig
+++ b/lib/compiler_rt/cmp.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig
index f5423019f1..1160b1c718 100644
--- a/lib/compiler_rt/common.zig
+++ b/lib/compiler_rt/common.zig
@@ -102,9 +102,14 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
+pub const test_safety = switch (builtin.zig_backend) {
+ .stage2_aarch64 => false,
+ else => builtin.is_test,
+};
+
// Avoid dragging in the runtime safety mechanisms into this .o file, unless
// we're trying to test compiler-rt.
-pub const panic = if (builtin.is_test) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
+pub const panic = if (test_safety) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
/// This seems to mostly correspond to `clang::TargetInfo::HasFloat16`.
pub fn F16T(comptime OtherType: type) type {
diff --git a/lib/compiler_rt/comparedf2_test.zig b/lib/compiler_rt/comparedf2_test.zig
index 9444c6adf7..dbae6bbeec 100644
--- a/lib/compiler_rt/comparedf2_test.zig
+++ b/lib/compiler_rt/comparedf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
const __ledf2 = @import("./cmpdf2.zig").__ledf2;
diff --git a/lib/compiler_rt/comparesf2_test.zig b/lib/compiler_rt/comparesf2_test.zig
index 40b1324cfa..65e78da99e 100644
--- a/lib/compiler_rt/comparesf2_test.zig
+++ b/lib/compiler_rt/comparesf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
const __lesf2 = @import("./cmpsf2.zig").__lesf2;
diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig
index c9bdfb7c23..874604eb2c 100644
--- a/lib/compiler_rt/count0bits.zig
+++ b/lib/compiler_rt/count0bits.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig
index 0340404a69..7b47cd3a70 100644
--- a/lib/compiler_rt/divdf3.zig
+++ b/lib/compiler_rt/divdf3.zig
@@ -5,7 +5,6 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const normalize = common.normalize;
diff --git a/lib/compiler_rt/divmodei4.zig b/lib/compiler_rt/divmodei4.zig
index 3f12e8697d..ab11452206 100644
--- a/lib/compiler_rt/divmodei4.zig
+++ b/lib/compiler_rt/divmodei4.zig
@@ -34,7 +34,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
}
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -43,7 +43,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
}
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
diff --git a/lib/compiler_rt/fixint_test.zig b/lib/compiler_rt/fixint_test.zig
index 57b4093809..198167ab86 100644
--- a/lib/compiler_rt/fixint_test.zig
+++ b/lib/compiler_rt/fixint_test.zig
@@ -1,4 +1,3 @@
-const is_test = @import("builtin").is_test;
const std = @import("std");
const math = std.math;
const testing = std.testing;
diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig
index 4a89d0799d..16c504ee66 100644
--- a/lib/compiler_rt/int.zig
+++ b/lib/compiler_rt/int.zig
@@ -6,7 +6,6 @@ const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
const __divti3 = @import("divti3.zig").__divti3;
diff --git a/lib/compiler_rt/memcpy.zig b/lib/compiler_rt/memcpy.zig
index 30971677ab..424e92954d 100644
--- a/lib/compiler_rt/memcpy.zig
+++ b/lib/compiler_rt/memcpy.zig
@@ -11,7 +11,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memcpySmall, export_options)
else
@export(&memcpyFast, export_options);
@@ -195,6 +195,8 @@ inline fn copyRange4(
}
test "memcpy" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
fn testFunc(comptime copy_func: anytype) !void {
const max_len = 1024;
diff --git a/lib/compiler_rt/memmove.zig b/lib/compiler_rt/memmove.zig
index 71289a50ae..46c5a631cb 100644
--- a/lib/compiler_rt/memmove.zig
+++ b/lib/compiler_rt/memmove.zig
@@ -14,7 +14,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memmoveSmall, export_options)
else
@export(&memmoveFast, export_options);
@@ -39,7 +39,7 @@ fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.c
}
fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.c) ?[*]u8 {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const small_limit = @max(2 * @sizeOf(Element), @sizeOf(Element));
if (copySmallLength(small_limit, dest.?, src.?, len)) return dest;
@@ -79,7 +79,7 @@ inline fn copyLessThan16(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
if (len < 4) {
if (len == 0) return;
const b = len / 2;
@@ -100,7 +100,7 @@ inline fn copy16ToSmallLimit(
src: [*]const u8,
len: usize,
) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
inline for (2..(std.math.log2(small_limit) + 1) / 2 + 1) |p| {
const limit = 1 << (2 * p);
if (len < limit) {
@@ -119,7 +119,7 @@ inline fn copyRange4(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
comptime assert(std.math.isPowerOfTwo(copy_len));
assert(len >= copy_len);
assert(len < 4 * copy_len);
@@ -147,7 +147,7 @@ inline fn copyForwards(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
assert(len >= 2 * @sizeOf(Element));
const head = src[0..@sizeOf(Element)].*;
@@ -181,7 +181,7 @@ inline fn copyBlocks(
src: anytype,
max_bytes: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const T = @typeInfo(@TypeOf(dest)).pointer.child;
comptime assert(T == @typeInfo(@TypeOf(src)).pointer.child);
@@ -217,6 +217,8 @@ inline fn copyBackwards(
}
test memmoveFast {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 = undefined;
for (&buffer, 0..) |*b, i| {
diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig
index ad60ec41a5..34d39fb9b7 100644
--- a/lib/compiler_rt/mulf3.zig
+++ b/lib/compiler_rt/mulf3.zig
@@ -6,7 +6,7 @@ const common = @import("./common.zig");
/// Ported from:
/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
pub inline fn mulf3(comptime T: type, a: T, b: T) T {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(T).float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
@@ -163,7 +163,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
///
/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(Z).int.bits;
var inexact = false;
if (count < typeWidth) {
diff --git a/lib/compiler_rt/rem_pio2_large.zig b/lib/compiler_rt/rem_pio2_large.zig
index b107a0fabb..f15e0d71f6 100644
--- a/lib/compiler_rt/rem_pio2_large.zig
+++ b/lib/compiler_rt/rem_pio2_large.zig
@@ -251,7 +251,7 @@ const PIo2 = [_]f64{
/// compiler will convert from decimal to binary accurately enough
/// to produce the hexadecimal values shown.
///
-pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
+pub fn rem_pio2_large(x: []const f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
var jz: i32 = undefined;
var jx: i32 = undefined;
var jv: i32 = undefined;
diff --git a/lib/compiler_rt/stack_probe.zig b/lib/compiler_rt/stack_probe.zig
index 94212b7a23..21259ec435 100644
--- a/lib/compiler_rt/stack_probe.zig
+++ b/lib/compiler_rt/stack_probe.zig
@@ -4,7 +4,6 @@ const common = @import("common.zig");
const os_tag = builtin.os.tag;
const arch = builtin.cpu.arch;
const abi = builtin.abi;
-const is_test = builtin.is_test;
pub const panic = common.panic;
diff --git a/lib/compiler_rt/suboti4_test.zig b/lib/compiler_rt/suboti4_test.zig
index 68ad0ff72f..65018bc966 100644
--- a/lib/compiler_rt/suboti4_test.zig
+++ b/lib/compiler_rt/suboti4_test.zig
@@ -1,4 +1,5 @@
const subo = @import("subo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -27,6 +28,8 @@ pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "suboti3" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
diff --git a/lib/compiler_rt/udivmod.zig b/lib/compiler_rt/udivmod.zig
index a9705f317d..bf6aaadeae 100644
--- a/lib/compiler_rt/udivmod.zig
+++ b/lib/compiler_rt/udivmod.zig
@@ -1,8 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const Log2Int = std.math.Log2Int;
-const HalveInt = @import("common.zig").HalveInt;
+const common = @import("common.zig");
+const HalveInt = common.HalveInt;
const lo = switch (builtin.cpu.arch.endian()) {
.big => 1,
@@ -14,7 +14,7 @@ const hi = 1 - lo;
// Returns U / v_ and sets r = U % v_.
fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
const HalfT = HalveInt(T, false).HalfT;
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
var v = v_;
const b = @as(T, 1) << (@bitSizeOf(T) / 2);
@@ -70,7 +70,7 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
}
fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
if (T == u64 and builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows) {
var rem: T = undefined;
const quo = asm (
@@ -90,7 +90,7 @@ fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
// Returns a_ / b_ and sets maybe_rem = a_ % b.
pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
const HalfT = HalveInt(T, false).HalfT;
const SignedT = std.meta.Int(.signed, @bitSizeOf(T));
diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig
index 6d6f6c1b65..0923f3f222 100644
--- a/lib/compiler_rt/udivmodei4.zig
+++ b/lib/compiler_rt/udivmodei4.zig
@@ -113,7 +113,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
}
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -122,7 +122,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -131,6 +131,7 @@ pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
test "__udivei4/__umodei4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
diff --git a/lib/docs/wasm/main.zig b/lib/docs/wasm/main.zig
index 7e9ffa5e4c..d3043cd917 100644
--- a/lib/docs/wasm/main.zig
+++ b/lib/docs/wasm/main.zig
@@ -772,10 +772,10 @@ export fn decl_type_html(decl_index: Decl.Index) String {
const Oom = error{OutOfMemory};
fn unpackInner(tar_bytes: []u8) !void {
- var fbs = std.io.fixedBufferStream(tar_bytes);
+ var reader: std.Io.Reader = .fixed(tar_bytes);
var file_name_buffer: [1024]u8 = undefined;
var link_name_buffer: [1024]u8 = undefined;
- var it = std.tar.iterator(fbs.reader(), .{
+ var it: std.tar.Iterator = .init(&reader, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
});
@@ -796,7 +796,7 @@ fn unpackInner(tar_bytes: []u8) !void {
{
gop.value_ptr.* = file;
}
- const file_bytes = tar_bytes[fbs.pos..][0..@intCast(tar_file.size)];
+ const file_bytes = tar_bytes[reader.seek..][0..@intCast(tar_file.size)];
assert(file == try Walk.add_file(file_name, file_bytes));
}
} else {
diff --git a/lib/init/build.zig b/lib/init/build.zig
index 8a1c03819b..481b586a44 100644
--- a/lib/init/build.zig
+++ b/lib/init/build.zig
@@ -1,4 +1,3 @@
-//! Use `zig init --strip` next time to generate a project without comments.
const std = @import("std");
// Although this function looks imperative, it does not perform the build
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index e65a71e12b..d6b0e68f5d 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -408,104 +408,179 @@ fn createChildOnly(
return child;
}
-fn userInputOptionsFromArgs(allocator: Allocator, args: anytype) UserInputOptionsMap {
- var user_input_options = UserInputOptionsMap.init(allocator);
+fn userInputOptionsFromArgs(arena: Allocator, args: anytype) UserInputOptionsMap {
+ var map = UserInputOptionsMap.init(arena);
inline for (@typeInfo(@TypeOf(args)).@"struct".fields) |field| {
- const v = @field(args, field.name);
- const T = @TypeOf(v);
- switch (T) {
- Target.Query => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = v.zigTriple(allocator) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
- user_input_options.put("cpu", .{
- .name = "cpu",
- .value = .{ .scalar = v.serializeCpuAlloc(allocator) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
- },
- ResolvedTarget => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = v.query.zigTriple(allocator) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
- user_input_options.put("cpu", .{
- .name = "cpu",
- .value = .{ .scalar = v.query.serializeCpuAlloc(allocator) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
- },
- LazyPath => {
- user_input_options.put(field.name, .{
+ if (field.type == @Type(.null)) continue;
+ addUserInputOptionFromArg(arena, &map, field, field.type, @field(args, field.name));
+ }
+ return map;
+}
+
+fn addUserInputOptionFromArg(
+ arena: Allocator,
+ map: *UserInputOptionsMap,
+ field: std.builtin.Type.StructField,
+ comptime T: type,
+ /// If null, the value won't be added, but `T` will still be type-checked.
+ maybe_value: ?T,
+) void {
+ switch (T) {
+ Target.Query => return if (maybe_value) |v| {
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = v.zigTriple(arena) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ map.put("cpu", .{
+ .name = "cpu",
+ .value = .{ .scalar = v.serializeCpuAlloc(arena) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ ResolvedTarget => return if (maybe_value) |v| {
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = v.query.zigTriple(arena) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ map.put("cpu", .{
+ .name = "cpu",
+ .value = .{ .scalar = v.query.serializeCpuAlloc(arena) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ std.zig.BuildId => return if (maybe_value) |v| {
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = std.fmt.allocPrint(arena, "{f}", .{v}) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ LazyPath => return if (maybe_value) |v| {
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .lazy_path = v.dupeInner(arena) },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ []const LazyPath => return if (maybe_value) |v| {
+ var list = ArrayList(LazyPath).initCapacity(arena, v.len) catch @panic("OOM");
+ for (v) |lp| list.appendAssumeCapacity(lp.dupeInner(arena));
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .lazy_path_list = list },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ []const u8 => return if (maybe_value) |v| {
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = arena.dupe(u8, v) catch @panic("OOM") },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ []const []const u8 => return if (maybe_value) |v| {
+ var list = ArrayList([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
+ for (v) |s| list.appendAssumeCapacity(arena.dupe(u8, s) catch @panic("OOM"));
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .list = list },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ else => switch (@typeInfo(T)) {
+ .bool => return if (maybe_value) |v| {
+ map.put(field.name, .{
.name = field.name,
- .value = .{ .lazy_path = v.dupeInner(allocator) },
+ .value = .{ .scalar = if (v) "true" else "false" },
.used = false,
}) catch @panic("OOM");
},
- []const LazyPath => {
- var list = ArrayList(LazyPath).initCapacity(allocator, v.len) catch @panic("OOM");
- for (v) |lp| list.appendAssumeCapacity(lp.dupeInner(allocator));
- user_input_options.put(field.name, .{
+ .@"enum", .enum_literal => return if (maybe_value) |v| {
+ map.put(field.name, .{
.name = field.name,
- .value = .{ .lazy_path_list = list },
+ .value = .{ .scalar = @tagName(v) },
.used = false,
}) catch @panic("OOM");
},
- []const u8 => {
- user_input_options.put(field.name, .{
+ .comptime_int, .int => return if (maybe_value) |v| {
+ map.put(field.name, .{
.name = field.name,
- .value = .{ .scalar = v },
+ .value = .{ .scalar = std.fmt.allocPrint(arena, "{d}", .{v}) catch @panic("OOM") },
.used = false,
}) catch @panic("OOM");
},
- []const []const u8 => {
- var list = ArrayList([]const u8).initCapacity(allocator, v.len) catch @panic("OOM");
- list.appendSliceAssumeCapacity(v);
-
- user_input_options.put(field.name, .{
+ .comptime_float, .float => return if (maybe_value) |v| {
+ map.put(field.name, .{
.name = field.name,
- .value = .{ .list = list },
+ .value = .{ .scalar = std.fmt.allocPrint(arena, "{x}", .{v}) catch @panic("OOM") },
.used = false,
}) catch @panic("OOM");
},
- else => switch (@typeInfo(T)) {
- .bool => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = if (v) "true" else "false" },
- .used = false,
- }) catch @panic("OOM");
- },
- .@"enum", .enum_literal => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = @tagName(v) },
- .used = false,
- }) catch @panic("OOM");
+ .pointer => |ptr_info| switch (ptr_info.size) {
+ .one => switch (@typeInfo(ptr_info.child)) {
+ .array => |array_info| {
+ comptime var slice_info = ptr_info;
+ slice_info.size = .slice;
+ slice_info.is_const = true;
+ slice_info.child = array_info.child;
+ slice_info.sentinel_ptr = null;
+ addUserInputOptionFromArg(
+ arena,
+ map,
+ field,
+ @Type(.{ .pointer = slice_info }),
+ maybe_value orelse null,
+ );
+ return;
+ },
+ else => {},
},
- .comptime_int, .int => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = std.fmt.allocPrint(allocator, "{d}", .{v}) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
+ .slice => switch (@typeInfo(ptr_info.child)) {
+ .@"enum" => return if (maybe_value) |v| {
+ var list = ArrayList([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
+ for (v) |tag| list.appendAssumeCapacity(@tagName(tag));
+ map.put(field.name, .{
+ .name = field.name,
+ .value = .{ .list = list },
+ .used = false,
+ }) catch @panic("OOM");
+ },
+ else => {
+ comptime var slice_info = ptr_info;
+ slice_info.is_const = true;
+ slice_info.sentinel_ptr = null;
+ addUserInputOptionFromArg(
+ arena,
+ map,
+ field,
+ @Type(.{ .pointer = slice_info }),
+ maybe_value orelse null,
+ );
+ return;
+ },
},
- .comptime_float, .float => {
- user_input_options.put(field.name, .{
- .name = field.name,
- .value = .{ .scalar = std.fmt.allocPrint(allocator, "{e}", .{v}) catch @panic("OOM") },
- .used = false,
- }) catch @panic("OOM");
+ else => {},
+ },
+ .null => unreachable,
+ .optional => |info| switch (@typeInfo(info.child)) {
+ .optional => {},
+ else => {
+ addUserInputOptionFromArg(
+ arena,
+ map,
+ field,
+ info.child,
+ maybe_value orelse null,
+ );
+ return;
},
- else => @compileError("option '" ++ field.name ++ "' has unsupported type: " ++ @typeName(T)),
},
- }
+ else => {},
+ },
}
-
- return user_input_options;
+ @compileError("option '" ++ field.name ++ "' has unsupported type: " ++ @typeName(field.type));
}
const OrderedUserValue = union(enum) {
diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig
index b28a6e185c..18582a60ef 100644
--- a/lib/std/Build/Fuzz/WebServer.zig
+++ b/lib/std/Build/Fuzz/WebServer.zig
@@ -273,21 +273,17 @@ fn buildWasmBinary(
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
- const Header = std.zig.Server.Message.Header;
var result: ?Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
- const stdout = poller.fifo(.stdout);
+ const stdout = poller.reader(.stdout);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try poller.poll())) break :poll;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try poller.poll())) break :poll;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
@@ -325,15 +321,11 @@ fn buildWasmBinary(
},
else => {}, // ignore other messages
}
-
- stdout.discard(body.len);
}
- const stderr = poller.fifo(.stderr);
- if (stderr.readableLength() > 0) {
- const owned_stderr = try stderr.toOwnedSlice();
- defer gpa.free(owned_stderr);
- std.debug.print("{s}", .{owned_stderr});
+ const stderr_contents = try poller.toOwnedSlice(.stderr);
+ if (stderr_contents.len > 0) {
+ std.debug.print("{s}", .{stderr_contents});
}
// Send EOF to stdin.
@@ -522,7 +514,9 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
var cwd_cache: ?[]const u8 = null;
- var archiver = std.tar.writer(response.writer());
+ var adapter = response.writer().adaptToNewApi();
+ var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
+ var read_buffer: [1024]u8 = undefined;
for (deduped_paths) |joined_path| {
var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
@@ -530,13 +524,14 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
continue;
};
defer file.close();
-
+ const stat = try file.stat();
+ var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
archiver.prefix = joined_path.root_dir.path orelse try memoizedCwd(arena, &cwd_cache);
- try archiver.writeFile(joined_path.sub_path, file);
+ try archiver.writeFile(joined_path.sub_path, &file_reader, stat.mtime);
}
- // intentionally omitting the pointless trailer
- //try archiver.finish();
+ // intentionally not calling `archiver.finishPedantically`
+ try adapter.new_interface.flush();
try response.end();
}
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 5192249f12..8583427aad 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -286,7 +286,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
}
/// For debugging purposes, prints identifying information about this Step.
-pub fn dump(step: *Step, w: *std.io.Writer, tty_config: std.io.tty.Config) void {
+pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
@@ -359,7 +359,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO
pub const ZigProcess = struct {
child: std.process.Child,
- poller: std.io.Poller(StreamEnum),
+ poller: std.Io.Poller(StreamEnum),
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
pub const StreamEnum = enum { stdout, stderr };
@@ -428,7 +428,7 @@ pub fn evalZigProcess(
const zp = try gpa.create(ZigProcess);
zp.* = .{
.child = child,
- .poller = std.io.poll(gpa, ZigProcess.StreamEnum, .{
+ .poller = std.Io.poll(gpa, ZigProcess.StreamEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
}),
@@ -508,20 +508,16 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
try sendMessage(zp.child.stdin.?, .update);
if (!watch) try sendMessage(zp.child.stdin.?, .exit);
- const Header = std.zig.Server.Message.Header;
var result: ?Path = null;
- const stdout = zp.poller.fifo(.stdout);
+ const stdout = zp.poller.reader(.stdout);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try zp.poller.poll())) break :poll;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try zp.poller.poll())) break :poll;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
@@ -547,11 +543,8 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
- if (watch) {
- // This message indicates the end of the update.
- stdout.discard(body.len);
- break;
- }
+ // This message indicates the end of the update.
+ if (watch) break :poll;
},
.emit_digest => {
const EmitDigest = std.zig.Server.Message.EmitDigest;
@@ -611,15 +604,13 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
},
else => {}, // ignore other messages
}
-
- stdout.discard(body.len);
}
s.result_duration_ns = timer.read();
- const stderr = zp.poller.fifo(.stderr);
- if (stderr.readableLength() > 0) {
- try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
+ const stderr_contents = try zp.poller.toOwnedSlice(.stderr);
+ if (stderr_contents.len > 0) {
+ try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents));
}
return result;
@@ -736,7 +727,7 @@ pub fn allocPrintCmd2(
argv: []const []const u8,
) Allocator.Error![]u8 {
const shell = struct {
- fn escape(writer: anytype, string: []const u8, is_argv0: bool) !void {
+ fn escape(writer: *std.Io.Writer, string: []const u8, is_argv0: bool) !void {
for (string) |c| {
if (switch (c) {
else => true,
@@ -770,9 +761,9 @@ pub fn allocPrintCmd2(
}
};
- var buf: std.ArrayListUnmanaged(u8) = .empty;
- const writer = buf.writer(arena);
- if (opt_cwd) |cwd| try writer.print("cd {s} && ", .{cwd});
+ var aw: std.Io.Writer.Allocating = .init(arena);
+ const writer = &aw.writer;
+ if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
if (opt_env) |env| {
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
var it = env.iterator();
@@ -782,17 +773,17 @@ pub fn allocPrintCmd2(
if (process_env_map.get(key)) |process_value| {
if (std.mem.eql(u8, value, process_value)) continue;
}
- try writer.print("{s}=", .{key});
- try shell.escape(writer, value, false);
- try writer.writeByte(' ');
+ writer.print("{s}=", .{key}) catch return error.OutOfMemory;
+ shell.escape(writer, value, false) catch return error.OutOfMemory;
+ writer.writeByte(' ') catch return error.OutOfMemory;
}
}
- try shell.escape(writer, argv[0], true);
+ shell.escape(writer, argv[0], true) catch return error.OutOfMemory;
for (argv[1..]) |arg| {
- try writer.writeByte(' ');
- try shell.escape(writer, arg, false);
+ writer.writeByte(' ') catch return error.OutOfMemory;
+ shell.escape(writer, arg, false) catch return error.OutOfMemory;
}
- return buf.toOwnedSlice(arena);
+ return aw.toOwnedSlice();
}
/// Prefer `cacheHitAndWatch` unless you already added watch inputs
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 356ea4e34e..141d18a7bf 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -681,10 +681,14 @@ pub fn producesImplib(compile: *Compile) bool {
return compile.isDll();
}
+/// Deprecated; use `compile.root_module.link_libc = true` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkLibC(compile: *Compile) void {
compile.root_module.link_libc = true;
}
+/// Deprecated; use `compile.root_module.link_libcpp = true` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkLibCpp(compile: *Compile) void {
compile.root_module.link_libcpp = true;
}
@@ -802,10 +806,14 @@ fn runPkgConfig(compile: *Compile, lib_name: []const u8) !PkgConfigResult {
};
}
+/// Deprecated; use `compile.root_module.linkSystemLibrary(name, .{})` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkSystemLibrary(compile: *Compile, name: []const u8) void {
return compile.root_module.linkSystemLibrary(name, .{});
}
+/// Deprecated; use `compile.root_module.linkSystemLibrary(name, options)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkSystemLibrary2(
compile: *Compile,
name: []const u8,
@@ -814,22 +822,26 @@ pub fn linkSystemLibrary2(
return compile.root_module.linkSystemLibrary(name, options);
}
+/// Deprecated; use `c.root_module.linkFramework(name, .{})` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkFramework(c: *Compile, name: []const u8) void {
c.root_module.linkFramework(name, .{});
}
-/// Handy when you have many C/C++ source files and want them all to have the same flags.
+/// Deprecated; use `compile.root_module.addCSourceFiles(options)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addCSourceFiles(compile: *Compile, options: Module.AddCSourceFilesOptions) void {
compile.root_module.addCSourceFiles(options);
}
+/// Deprecated; use `compile.root_module.addCSourceFile(source)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addCSourceFile(compile: *Compile, source: Module.CSourceFile) void {
compile.root_module.addCSourceFile(source);
}
-/// Resource files must have the extension `.rc`.
-/// Can be called regardless of target. The .rc file will be ignored
-/// if the target object format does not support embedded resources.
+/// Deprecated; use `compile.root_module.addWin32ResourceFile(source)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addWin32ResourceFile(compile: *Compile, source: Module.RcSourceFile) void {
compile.root_module.addWin32ResourceFile(source);
}
@@ -915,54 +927,80 @@ pub fn getEmittedLlvmBc(compile: *Compile) LazyPath {
return compile.getEmittedFileGeneric(&compile.generated_llvm_bc);
}
+/// Deprecated; use `compile.root_module.addAssemblyFile(source)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addAssemblyFile(compile: *Compile, source: LazyPath) void {
compile.root_module.addAssemblyFile(source);
}
+/// Deprecated; use `compile.root_module.addObjectFile(source)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addObjectFile(compile: *Compile, source: LazyPath) void {
compile.root_module.addObjectFile(source);
}
+/// Deprecated; use `compile.root_module.addObject(object)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addObject(compile: *Compile, object: *Compile) void {
compile.root_module.addObject(object);
}
+/// Deprecated; use `compile.root_module.linkLibrary(library)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn linkLibrary(compile: *Compile, library: *Compile) void {
compile.root_module.linkLibrary(library);
}
+/// Deprecated; use `compile.root_module.addAfterIncludePath(lazy_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addAfterIncludePath(compile: *Compile, lazy_path: LazyPath) void {
compile.root_module.addAfterIncludePath(lazy_path);
}
+/// Deprecated; use `compile.root_module.addSystemIncludePath(lazy_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addSystemIncludePath(compile: *Compile, lazy_path: LazyPath) void {
compile.root_module.addSystemIncludePath(lazy_path);
}
+/// Deprecated; use `compile.root_module.addIncludePath(lazy_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addIncludePath(compile: *Compile, lazy_path: LazyPath) void {
compile.root_module.addIncludePath(lazy_path);
}
+/// Deprecated; use `compile.root_module.addConfigHeader(config_header)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addConfigHeader(compile: *Compile, config_header: *Step.ConfigHeader) void {
compile.root_module.addConfigHeader(config_header);
}
+/// Deprecated; use `compile.root_module.addEmbedPath(lazy_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addEmbedPath(compile: *Compile, lazy_path: LazyPath) void {
compile.root_module.addEmbedPath(lazy_path);
}
+/// Deprecated; use `compile.root_module.addLibraryPath(directory_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addLibraryPath(compile: *Compile, directory_path: LazyPath) void {
compile.root_module.addLibraryPath(directory_path);
}
+/// Deprecated; use `compile.root_module.addRPath(directory_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addRPath(compile: *Compile, directory_path: LazyPath) void {
compile.root_module.addRPath(directory_path);
}
+/// Deprecated; use `compile.root_module.addSystemFrameworkPath(directory_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addSystemFrameworkPath(compile: *Compile, directory_path: LazyPath) void {
compile.root_module.addSystemFrameworkPath(directory_path);
}
+/// Deprecated; use `compile.root_module.addFrameworkPath(directory_path)` instead.
+/// To be removed after 0.15.0 is tagged.
pub fn addFrameworkPath(compile: *Compile, directory_path: LazyPath) void {
compile.root_module.addFrameworkPath(directory_path);
}
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index e35b602e06..57f5d73f0c 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -73,9 +73,12 @@ skip_foreign_checks: bool,
/// external executor (such as qemu) but not fail if the executor is unavailable.
failing_to_execute_foreign_is_an_error: bool,
+/// Deprecated in favor of `stdio_limit`.
+max_stdio_size: usize,
+
/// If stderr or stdout exceeds this amount, the child process is killed and
/// the step fails.
-max_stdio_size: usize,
+stdio_limit: std.Io.Limit,
captured_stdout: ?*Output,
captured_stderr: ?*Output,
@@ -169,7 +172,7 @@ pub const Output = struct {
pub fn create(owner: *std.Build, name: []const u8) *Run {
const run = owner.allocator.create(Run) catch @panic("OOM");
run.* = .{
- .step = Step.init(.{
+ .step = .init(.{
.id = base_id,
.name = name,
.owner = owner,
@@ -186,6 +189,7 @@ pub fn create(owner: *std.Build, name: []const u8) *Run {
.skip_foreign_checks = false,
.failing_to_execute_foreign_is_an_error = true,
.max_stdio_size = 10 * 1024 * 1024,
+ .stdio_limit = .unlimited,
.captured_stdout = null,
.captured_stderr = null,
.dep_output_file = null,
@@ -1011,7 +1015,7 @@ fn populateGeneratedPaths(
}
}
-fn formatTerm(term: ?std.process.Child.Term, w: *std.io.Writer) std.io.Writer.Error!void {
+fn formatTerm(term: ?std.process.Child.Term, w: *std.Io.Writer) std.Io.Writer.Error!void {
if (term) |t| switch (t) {
.Exited => |code| try w.print("exited with code {d}", .{code}),
.Signal => |sig| try w.print("terminated with signal {d}", .{sig}),
@@ -1500,7 +1504,7 @@ fn evalZigTest(
const gpa = run.step.owner.allocator;
const arena = run.step.owner.allocator;
- var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
@@ -1524,11 +1528,6 @@ fn evalZigTest(
break :failed false;
};
- const Header = std.zig.Server.Message.Header;
-
- const stdout = poller.fifo(.stdout);
- const stderr = poller.fifo(.stderr);
-
var fail_count: u32 = 0;
var skip_count: u32 = 0;
var leak_count: u32 = 0;
@@ -1541,16 +1540,14 @@ fn evalZigTest(
var sub_prog_node: ?std.Progress.Node = null;
defer if (sub_prog_node) |n| n.end();
+ const stdout = poller.reader(.stdout);
+ const stderr = poller.reader(.stderr);
const any_write_failed = first_write_failed or poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try poller.poll())) break :poll false;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try poller.poll())) break :poll false;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
-
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll false;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@@ -1607,9 +1604,9 @@ fn evalZigTest(
if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
- const orig_msg = stderr.readableSlice(0);
- defer stderr.discard(orig_msg.len);
- const msg = std.mem.trim(u8, orig_msg, "\n");
+ const stderr_contents = stderr.buffered();
+ stderr.toss(stderr_contents.len);
+ const msg = std.mem.trim(u8, stderr_contents, "\n");
const label = if (tr_hdr.flags.fail)
"failed"
else if (tr_hdr.flags.leak)
@@ -1660,8 +1657,6 @@ fn evalZigTest(
},
else => {}, // ignore other messages
}
-
- stdout.discard(body.len);
};
if (any_write_failed) {
@@ -1670,9 +1665,9 @@ fn evalZigTest(
while (try poller.poll()) {}
}
- if (stderr.readableLength() > 0) {
- const msg = std.mem.trim(u8, try stderr.toOwnedSlice(), "\n");
- if (msg.len > 0) run.step.result_stderr = msg;
+ const stderr_contents = std.mem.trim(u8, stderr.buffered(), "\n");
+ if (stderr_contents.len > 0) {
+ run.step.result_stderr = try arena.dupe(u8, stderr_contents);
}
// Send EOF to stdin.
@@ -1769,13 +1764,22 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
child.stdin = null;
},
.lazy_path => |lazy_path| {
- const path = lazy_path.getPath2(b, &run.step);
- const file = b.build_root.handle.openFile(path, .{}) catch |err| {
+ const path = lazy_path.getPath3(b, &run.step);
+ const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| {
return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)});
};
defer file.close();
- child.stdin.?.writeFileAll(file, .{}) catch |err| {
- return run.step.fail("unable to write file to stdin: {s}", .{@errorName(err)});
+ // TODO https://github.com/ziglang/zig/issues/23955
+ var buffer: [1024]u8 = undefined;
+ var file_reader = file.reader(&buffer);
+ var stdin_writer = child.stdin.?.writer(&.{});
+ _ = stdin_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
+ error.ReadFailed => return run.step.fail("failed to read from {f}: {t}", .{
+ path, file_reader.err.?,
+ }),
+ error.WriteFailed => return run.step.fail("failed to write to stdin: {t}", .{
+ stdin_writer.err.?,
+ }),
};
child.stdin.?.close();
child.stdin = null;
@@ -1786,28 +1790,43 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
var stdout_bytes: ?[]const u8 = null;
var stderr_bytes: ?[]const u8 = null;
+ run.stdio_limit = run.stdio_limit.min(.limited(run.max_stdio_size));
if (child.stdout) |stdout| {
if (child.stderr) |stderr| {
- var poller = std.io.poll(arena, enum { stdout, stderr }, .{
+ var poller = std.Io.poll(arena, enum { stdout, stderr }, .{
.stdout = stdout,
.stderr = stderr,
});
defer poller.deinit();
while (try poller.poll()) {
- if (poller.fifo(.stdout).count > run.max_stdio_size)
- return error.StdoutStreamTooLong;
- if (poller.fifo(.stderr).count > run.max_stdio_size)
- return error.StderrStreamTooLong;
+ if (run.stdio_limit.toInt()) |limit| {
+ if (poller.reader(.stderr).buffered().len > limit)
+ return error.StdoutStreamTooLong;
+ if (poller.reader(.stderr).buffered().len > limit)
+ return error.StderrStreamTooLong;
+ }
}
- stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
- stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
+ stdout_bytes = try poller.toOwnedSlice(.stdout);
+ stderr_bytes = try poller.toOwnedSlice(.stderr);
} else {
- stdout_bytes = try stdout.deprecatedReader().readAllAlloc(arena, run.max_stdio_size);
+ var small_buffer: [1]u8 = undefined;
+ var stdout_reader = stdout.readerStreaming(&small_buffer);
+ stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ReadFailed => return stdout_reader.err.?,
+ error.StreamTooLong => return error.StdoutStreamTooLong,
+ };
}
} else if (child.stderr) |stderr| {
- stderr_bytes = try stderr.deprecatedReader().readAllAlloc(arena, run.max_stdio_size);
+ var small_buffer: [1]u8 = undefined;
+ var stderr_reader = stderr.readerStreaming(&small_buffer);
+ stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ReadFailed => return stderr_reader.err.?,
+ error.StreamTooLong => return error.StderrStreamTooLong,
+ };
}
if (stderr_bytes) |bytes| if (bytes.len > 0) {
diff --git a/lib/std/Io.zig b/lib/std/Io.zig
index ff6966d7f7..1511f0dcad 100644
--- a/lib/std/Io.zig
+++ b/lib/std/Io.zig
@@ -1,16 +1,11 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
-const root = @import("root");
-const c = std.c;
const is_windows = builtin.os.tag == .windows;
+
+const std = @import("std.zig");
const windows = std.os.windows;
const posix = std.posix;
const math = std.math;
const assert = std.debug.assert;
-const fs = std.fs;
-const mem = std.mem;
-const meta = std.meta;
-const File = std.fs.File;
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
@@ -314,11 +309,11 @@ pub fn GenericReader(
}
/// Helper for bridging to the new `Reader` API while upgrading.
- pub fn adaptToNewApi(self: *const Self) Adapter {
+ pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
return .{
.derp_reader = self.*,
.new_interface = .{
- .buffer = &.{},
+ .buffer = buffer,
.vtable = &.{ .stream = Adapter.stream },
.seek = 0,
.end = 0,
@@ -334,10 +329,12 @@ pub fn GenericReader(
fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", r));
const buf = limit.slice(try w.writableSliceGreedy(1));
- return a.derp_reader.read(buf) catch |err| {
+ const n = a.derp_reader.read(buf) catch |err| {
a.err = err;
return error.ReadFailed;
};
+ w.advance(n);
+ return n;
}
};
};
@@ -419,9 +416,14 @@ pub fn GenericWriter(
new_interface: Writer,
err: ?Error = null,
- fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
+ fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
_ = splat;
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
+ const buffered = w.buffered();
+ if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
+ a.err = err;
+ return error.WriteFailed;
+ });
return a.derp_writer.write(data[0]) catch |err| {
a.err = err;
return error.WriteFailed;
@@ -435,54 +437,46 @@ pub fn GenericWriter(
pub const AnyReader = @import("Io/DeprecatedReader.zig");
/// Deprecated in favor of `Writer`.
pub const AnyWriter = @import("Io/DeprecatedWriter.zig");
-
+/// Deprecated in favor of `File.Reader` and `File.Writer`.
pub const SeekableStream = @import("Io/seekable_stream.zig").SeekableStream;
-
+/// Deprecated in favor of `Writer`.
pub const BufferedWriter = @import("Io/buffered_writer.zig").BufferedWriter;
+/// Deprecated in favor of `Writer`.
pub const bufferedWriter = @import("Io/buffered_writer.zig").bufferedWriter;
-
+/// Deprecated in favor of `Reader`.
pub const BufferedReader = @import("Io/buffered_reader.zig").BufferedReader;
+/// Deprecated in favor of `Reader`.
pub const bufferedReader = @import("Io/buffered_reader.zig").bufferedReader;
+/// Deprecated in favor of `Reader`.
pub const bufferedReaderSize = @import("Io/buffered_reader.zig").bufferedReaderSize;
-
+/// Deprecated in favor of `Reader`.
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
+/// Deprecated in favor of `Reader`.
pub const fixedBufferStream = @import("Io/fixed_buffer_stream.zig").fixedBufferStream;
-
-pub const CWriter = @import("Io/c_writer.zig").CWriter;
-pub const cWriter = @import("Io/c_writer.zig").cWriter;
-
+/// Deprecated in favor of `Reader.Limited`.
pub const LimitedReader = @import("Io/limited_reader.zig").LimitedReader;
+/// Deprecated in favor of `Reader.Limited`.
pub const limitedReader = @import("Io/limited_reader.zig").limitedReader;
-
+/// Deprecated with no replacement; inefficient pattern
pub const CountingWriter = @import("Io/counting_writer.zig").CountingWriter;
+/// Deprecated with no replacement; inefficient pattern
pub const countingWriter = @import("Io/counting_writer.zig").countingWriter;
+/// Deprecated with no replacement; inefficient pattern
pub const CountingReader = @import("Io/counting_reader.zig").CountingReader;
+/// Deprecated with no replacement; inefficient pattern
pub const countingReader = @import("Io/counting_reader.zig").countingReader;
-pub const MultiWriter = @import("Io/multi_writer.zig").MultiWriter;
-pub const multiWriter = @import("Io/multi_writer.zig").multiWriter;
-
pub const BitReader = @import("Io/bit_reader.zig").BitReader;
pub const bitReader = @import("Io/bit_reader.zig").bitReader;
pub const BitWriter = @import("Io/bit_writer.zig").BitWriter;
pub const bitWriter = @import("Io/bit_writer.zig").bitWriter;
-pub const ChangeDetectionStream = @import("Io/change_detection_stream.zig").ChangeDetectionStream;
-pub const changeDetectionStream = @import("Io/change_detection_stream.zig").changeDetectionStream;
-
-pub const FindByteWriter = @import("Io/find_byte_writer.zig").FindByteWriter;
-pub const findByteWriter = @import("Io/find_byte_writer.zig").findByteWriter;
-
-pub const BufferedAtomicFile = @import("Io/buffered_atomic_file.zig").BufferedAtomicFile;
-
-pub const StreamSource = @import("Io/stream_source.zig").StreamSource;
-
pub const tty = @import("Io/tty.zig");
-/// A Writer that doesn't write to anything.
+/// Deprecated in favor of `Writer.Discarding`.
pub const null_writer: NullWriter = .{ .context = {} };
-
+/// Deprecated in favor of `Writer.Discarding`.
pub const NullWriter = GenericWriter(void, error{}, dummyWrite);
fn dummyWrite(context: void, data: []const u8) error{}!usize {
_ = context;
@@ -494,54 +488,51 @@ test null_writer {
}
pub fn poll(
- allocator: Allocator,
+ gpa: Allocator,
comptime StreamEnum: type,
files: PollFiles(StreamEnum),
) Poller(StreamEnum) {
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
- var result: Poller(StreamEnum) = undefined;
-
- if (is_windows) result.windows = .{
- .first_read_done = false,
- .overlapped = [1]windows.OVERLAPPED{
- mem.zeroes(windows.OVERLAPPED),
- } ** enum_fields.len,
- .small_bufs = undefined,
- .active = .{
- .count = 0,
- .handles_buf = undefined,
- .stream_map = undefined,
- },
+ var result: Poller(StreamEnum) = .{
+ .gpa = gpa,
+ .readers = @splat(.failing),
+ .poll_fds = undefined,
+ .windows = if (is_windows) .{
+ .first_read_done = false,
+ .overlapped = [1]windows.OVERLAPPED{
+ std.mem.zeroes(windows.OVERLAPPED),
+ } ** enum_fields.len,
+ .small_bufs = undefined,
+ .active = .{
+ .count = 0,
+ .handles_buf = undefined,
+ .stream_map = undefined,
+ },
+ } else {},
};
- inline for (0..enum_fields.len) |i| {
- result.fifos[i] = .{
- .allocator = allocator,
- .buf = &.{},
- .head = 0,
- .count = 0,
- };
+ inline for (enum_fields, 0..) |field, i| {
if (is_windows) {
- result.windows.active.handles_buf[i] = @field(files, enum_fields[i].name).handle;
+ result.windows.active.handles_buf[i] = @field(files, field.name).handle;
} else {
result.poll_fds[i] = .{
- .fd = @field(files, enum_fields[i].name).handle,
+ .fd = @field(files, field.name).handle,
.events = posix.POLL.IN,
.revents = undefined,
};
}
}
+
return result;
}
-pub const PollFifo = std.fifo.LinearFifo(u8, .Dynamic);
-
pub fn Poller(comptime StreamEnum: type) type {
return struct {
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
const PollFd = if (is_windows) void else posix.pollfd;
- fifos: [enum_fields.len]PollFifo,
+ gpa: Allocator,
+ readers: [enum_fields.len]Reader,
poll_fds: [enum_fields.len]PollFd,
windows: if (is_windows) struct {
first_read_done: bool,
@@ -553,7 +544,7 @@ pub fn Poller(comptime StreamEnum: type) type {
stream_map: [enum_fields.len]StreamEnum,
pub fn removeAt(self: *@This(), index: u32) void {
- std.debug.assert(index < self.count);
+ assert(index < self.count);
for (index + 1..self.count) |i| {
self.handles_buf[i - 1] = self.handles_buf[i];
self.stream_map[i - 1] = self.stream_map[i];
@@ -566,13 +557,14 @@ pub fn Poller(comptime StreamEnum: type) type {
const Self = @This();
pub fn deinit(self: *Self) void {
+ const gpa = self.gpa;
if (is_windows) {
// cancel any pending IO to prevent clobbering OVERLAPPED value
for (self.windows.active.handles_buf[0..self.windows.active.count]) |h| {
_ = windows.kernel32.CancelIo(h);
}
}
- inline for (&self.fifos) |*q| q.deinit();
+ inline for (&self.readers) |*r| gpa.free(r.buffer);
self.* = undefined;
}
@@ -592,21 +584,40 @@ pub fn Poller(comptime StreamEnum: type) type {
}
}
- pub inline fn fifo(self: *Self, comptime which: StreamEnum) *PollFifo {
- return &self.fifos[@intFromEnum(which)];
+ pub fn reader(self: *Self, which: StreamEnum) *Reader {
+ return &self.readers[@intFromEnum(which)];
+ }
+
+ pub fn toOwnedSlice(self: *Self, which: StreamEnum) error{OutOfMemory}![]u8 {
+ const gpa = self.gpa;
+ const r = reader(self, which);
+ if (r.seek == 0) {
+ const new = try gpa.realloc(r.buffer, r.end);
+ r.buffer = &.{};
+ r.end = 0;
+ return new;
+ }
+ const new = try gpa.dupe(u8, r.buffered());
+ gpa.free(r.buffer);
+ r.buffer = &.{};
+ r.seek = 0;
+ r.end = 0;
+ return new;
}
fn pollWindows(self: *Self, nanoseconds: ?u64) !bool {
const bump_amt = 512;
+ const gpa = self.gpa;
if (!self.windows.first_read_done) {
var already_read_data = false;
for (0..enum_fields.len) |i| {
const handle = self.windows.active.handles_buf[i];
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
+ gpa,
handle,
&self.windows.overlapped[i],
- &self.fifos[i],
+ &self.readers[i],
&self.windows.small_bufs[i],
bump_amt,
)) {
@@ -653,7 +664,7 @@ pub fn Poller(comptime StreamEnum: type) type {
const handle = self.windows.active.handles_buf[active_idx];
const overlapped = &self.windows.overlapped[stream_idx];
- const stream_fifo = &self.fifos[stream_idx];
+ const stream_reader = &self.readers[stream_idx];
const small_buf = &self.windows.small_bufs[stream_idx];
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
@@ -664,12 +675,16 @@ pub fn Poller(comptime StreamEnum: type) type {
},
.aborted => unreachable,
};
- try stream_fifo.write(small_buf[0..num_bytes_read]);
+ const buf = small_buf[0..num_bytes_read];
+ const dest = try writableSliceGreedyAlloc(stream_reader, gpa, buf.len);
+ @memcpy(dest[0..buf.len], buf);
+ advanceBufferEnd(stream_reader, buf.len);
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
+ gpa,
handle,
overlapped,
- stream_fifo,
+ stream_reader,
small_buf,
bump_amt,
)) {
@@ -684,6 +699,7 @@ pub fn Poller(comptime StreamEnum: type) type {
}
fn pollPosix(self: *Self, nanoseconds: ?u64) !bool {
+ const gpa = self.gpa;
// We ask for ensureUnusedCapacity with this much extra space. This
// has more of an effect on small reads because once the reads
// start to get larger the amount of space an ArrayList will
@@ -703,18 +719,18 @@ pub fn Poller(comptime StreamEnum: type) type {
}
var keep_polling = false;
- inline for (&self.poll_fds, &self.fifos) |*poll_fd, *q| {
+ for (&self.poll_fds, &self.readers) |*poll_fd, *r| {
// Try reading whatever is available before checking the error
// conditions.
// It's still possible to read after a POLL.HUP is received,
// always check if there's some data waiting to be read first.
if (poll_fd.revents & posix.POLL.IN != 0) {
- const buf = try q.writableWithSize(bump_amt);
+ const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
error.BrokenPipe => 0, // Handle the same as EOF.
else => |e| return e,
};
- q.update(amt);
+ advanceBufferEnd(r, amt);
if (amt == 0) {
// Remove the fd when the EOF condition is met.
poll_fd.fd = -1;
@@ -730,146 +746,181 @@ pub fn Poller(comptime StreamEnum: type) type {
}
return keep_polling;
}
- };
-}
-/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
-/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
-/// compatibility, we point it to this dummy variables, which we never otherwise access.
-/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
-var win_dummy_bytes_read: u32 = undefined;
-
-/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
-/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
-/// is available. `handle` must have no pending asynchronous operation.
-fn windowsAsyncReadToFifoAndQueueSmallRead(
- handle: windows.HANDLE,
- overlapped: *windows.OVERLAPPED,
- fifo: *PollFifo,
- small_buf: *[128]u8,
- bump_amt: usize,
-) !enum { empty, populated, closed_populated, closed } {
- var read_any_data = false;
- while (true) {
- const fifo_read_pending = while (true) {
- const buf = try fifo.writableWithSize(bump_amt);
- const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
-
- if (0 == windows.kernel32.ReadFile(
- handle,
- buf.ptr,
- buf_len,
- &win_dummy_bytes_read,
- overlapped,
- )) switch (windows.GetLastError()) {
- .IO_PENDING => break true,
- .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
- else => |err| return windows.unexpectedError(err),
- };
+ /// Returns a slice into the unused capacity of `buffer` with at least
+ /// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
+ ///
+ /// After calling this function, typically the caller will follow up with a
+ /// call to `advanceBufferEnd` to report the actual number of bytes buffered.
+ fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
+ {
+ const unused = r.buffer[r.end..];
+ if (unused.len >= min_len) return unused;
+ }
+ if (r.seek > 0) r.rebase(r.buffer.len) catch unreachable;
+ {
+ var list: std.ArrayListUnmanaged(u8) = .{
+ .items = r.buffer[0..r.end],
+ .capacity = r.buffer.len,
+ };
+ defer r.buffer = list.allocatedSlice();
+ try list.ensureUnusedCapacity(allocator, min_len);
+ }
+ const unused = r.buffer[r.end..];
+ assert(unused.len >= min_len);
+ return unused;
+ }
+
+ /// After writing directly into the unused capacity of `buffer`, this function
+ /// updates `end` so that users of `Reader` can receive the data.
+ fn advanceBufferEnd(r: *Reader, n: usize) void {
+ assert(n <= r.buffer.len - r.end);
+ r.end += n;
+ }
+
+ /// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
+ /// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
+ /// compatibility, we point it to this dummy variables, which we never otherwise access.
+ /// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
+ var win_dummy_bytes_read: u32 = undefined;
+
+ /// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
+ /// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
+ /// is available. `handle` must have no pending asynchronous operation.
+ fn windowsAsyncReadToFifoAndQueueSmallRead(
+ gpa: Allocator,
+ handle: windows.HANDLE,
+ overlapped: *windows.OVERLAPPED,
+ r: *Reader,
+ small_buf: *[128]u8,
+ bump_amt: usize,
+ ) !enum { empty, populated, closed_populated, closed } {
+ var read_any_data = false;
+ while (true) {
+ const fifo_read_pending = while (true) {
+ const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
+ const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
- const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
- .success => |n| n,
- .closed => return if (read_any_data) .closed_populated else .closed,
- .aborted => unreachable,
- };
+ if (0 == windows.kernel32.ReadFile(
+ handle,
+ buf.ptr,
+ buf_len,
+ &win_dummy_bytes_read,
+ overlapped,
+ )) switch (windows.GetLastError()) {
+ .IO_PENDING => break true,
+ .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
+ else => |err| return windows.unexpectedError(err),
+ };
- read_any_data = true;
- fifo.update(num_bytes_read);
+ const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
+ .success => |n| n,
+ .closed => return if (read_any_data) .closed_populated else .closed,
+ .aborted => unreachable,
+ };
- if (num_bytes_read == buf_len) {
- // We filled the buffer, so there's probably more data available.
- continue;
- } else {
- // We didn't fill the buffer, so assume we're out of data.
- // There is no pending read.
- break false;
- }
- };
+ read_any_data = true;
+ advanceBufferEnd(r, num_bytes_read);
- if (fifo_read_pending) cancel_read: {
- // Cancel the pending read into the FIFO.
- _ = windows.kernel32.CancelIo(handle);
+ if (num_bytes_read == buf_len) {
+ // We filled the buffer, so there's probably more data available.
+ continue;
+ } else {
+ // We didn't fill the buffer, so assume we're out of data.
+ // There is no pending read.
+ break false;
+ }
+ };
- // We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
- switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
- windows.WAIT_OBJECT_0 => {},
- windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
- else => unreachable,
- }
+ if (fifo_read_pending) cancel_read: {
+ // Cancel the pending read into the FIFO.
+ _ = windows.kernel32.CancelIo(handle);
- // If it completed before we canceled, make sure to tell the FIFO!
- const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
- .success => |n| n,
- .closed => return if (read_any_data) .closed_populated else .closed,
- .aborted => break :cancel_read,
- };
- read_any_data = true;
- fifo.update(num_bytes_read);
- }
-
- // Try to queue the 1-byte read.
- if (0 == windows.kernel32.ReadFile(
- handle,
- small_buf,
- small_buf.len,
- &win_dummy_bytes_read,
- overlapped,
- )) switch (windows.GetLastError()) {
- .IO_PENDING => {
- // 1-byte read pending as intended
- return if (read_any_data) .populated else .empty;
- },
- .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
- else => |err| return windows.unexpectedError(err),
- };
+ // We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
+ switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
+ windows.WAIT_OBJECT_0 => {},
+ windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
+ else => unreachable,
+ }
- // We got data back this time. Write it to the FIFO and run the main loop again.
- const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
- .success => |n| n,
- .closed => return if (read_any_data) .closed_populated else .closed,
- .aborted => unreachable,
- };
- try fifo.write(small_buf[0..num_bytes_read]);
- read_any_data = true;
- }
-}
+ // If it completed before we canceled, make sure to tell the FIFO!
+ const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
+ .success => |n| n,
+ .closed => return if (read_any_data) .closed_populated else .closed,
+ .aborted => break :cancel_read,
+ };
+ read_any_data = true;
+ advanceBufferEnd(r, num_bytes_read);
+ }
-/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
-/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
-///
-/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
-/// operation immediately returns data:
-/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
-/// erroneous results."
-/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
-/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
-/// get the actual number of bytes read."
-/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
-fn windowsGetReadResult(
- handle: windows.HANDLE,
- overlapped: *windows.OVERLAPPED,
- allow_aborted: bool,
-) !union(enum) {
- success: u32,
- closed,
- aborted,
-} {
- var num_bytes_read: u32 = undefined;
- if (0 == windows.kernel32.GetOverlappedResult(
- handle,
- overlapped,
- &num_bytes_read,
- 0,
- )) switch (windows.GetLastError()) {
- .BROKEN_PIPE => return .closed,
- .OPERATION_ABORTED => |err| if (allow_aborted) {
- return .aborted;
- } else {
- return windows.unexpectedError(err);
- },
- else => |err| return windows.unexpectedError(err),
+ // Try to queue the 1-byte read.
+ if (0 == windows.kernel32.ReadFile(
+ handle,
+ small_buf,
+ small_buf.len,
+ &win_dummy_bytes_read,
+ overlapped,
+ )) switch (windows.GetLastError()) {
+ .IO_PENDING => {
+ // 1-byte read pending as intended
+ return if (read_any_data) .populated else .empty;
+ },
+ .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
+ else => |err| return windows.unexpectedError(err),
+ };
+
+ // We got data back this time. Write it to the FIFO and run the main loop again.
+ const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
+ .success => |n| n,
+ .closed => return if (read_any_data) .closed_populated else .closed,
+ .aborted => unreachable,
+ };
+ const buf = small_buf[0..num_bytes_read];
+ const dest = try writableSliceGreedyAlloc(r, gpa, buf.len);
+ @memcpy(dest[0..buf.len], buf);
+ advanceBufferEnd(r, buf.len);
+ read_any_data = true;
+ }
+ }
+
+ /// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
+ /// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
+ ///
+ /// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
+ /// operation immediately returns data:
+ /// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
+ /// erroneous results."
+ /// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
+ /// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
+ /// get the actual number of bytes read."
+ /// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
+ fn windowsGetReadResult(
+ handle: windows.HANDLE,
+ overlapped: *windows.OVERLAPPED,
+ allow_aborted: bool,
+ ) !union(enum) {
+ success: u32,
+ closed,
+ aborted,
+ } {
+ var num_bytes_read: u32 = undefined;
+ if (0 == windows.kernel32.GetOverlappedResult(
+ handle,
+ overlapped,
+ &num_bytes_read,
+ 0,
+ )) switch (windows.GetLastError()) {
+ .BROKEN_PIPE => return .closed,
+ .OPERATION_ABORTED => |err| if (allow_aborted) {
+ return .aborted;
+ } else {
+ return windows.unexpectedError(err);
+ },
+ else => |err| return windows.unexpectedError(err),
+ };
+ return .{ .success = num_bytes_read };
+ }
};
- return .{ .success = num_bytes_read };
}
/// Given an enum, returns a struct with fields of that enum, each field
@@ -880,10 +931,10 @@ pub fn PollFiles(comptime StreamEnum: type) type {
for (&struct_fields, enum_fields) |*struct_field, enum_field| {
struct_field.* = .{
.name = enum_field.name,
- .type = fs.File,
+ .type = std.fs.File,
.default_value_ptr = null,
.is_comptime = false,
- .alignment = @alignOf(fs.File),
+ .alignment = @alignOf(std.fs.File),
};
}
return @Type(.{ .@"struct" = .{
@@ -898,16 +949,14 @@ test {
_ = Reader;
_ = Reader.Limited;
_ = Writer;
- _ = @import("Io/bit_reader.zig");
- _ = @import("Io/bit_writer.zig");
- _ = @import("Io/buffered_atomic_file.zig");
- _ = @import("Io/buffered_reader.zig");
- _ = @import("Io/buffered_writer.zig");
- _ = @import("Io/c_writer.zig");
- _ = @import("Io/counting_writer.zig");
- _ = @import("Io/counting_reader.zig");
- _ = @import("Io/fixed_buffer_stream.zig");
- _ = @import("Io/seekable_stream.zig");
- _ = @import("Io/stream_source.zig");
+ _ = BitReader;
+ _ = BitWriter;
+ _ = BufferedReader;
+ _ = BufferedWriter;
+ _ = CountingWriter;
+ _ = CountingReader;
+ _ = FixedBufferStream;
+ _ = SeekableStream;
+ _ = tty;
_ = @import("Io/test.zig");
}
diff --git a/lib/std/Io/DeprecatedReader.zig b/lib/std/Io/DeprecatedReader.zig
index f6cb9f61d5..af1eda8415 100644
--- a/lib/std/Io/DeprecatedReader.zig
+++ b/lib/std/Io/DeprecatedReader.zig
@@ -373,11 +373,11 @@ pub fn discard(self: Self) anyerror!u64 {
}
/// Helper for bridging to the new `Reader` API while upgrading.
-pub fn adaptToNewApi(self: *const Self) Adapter {
+pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
return .{
.derp_reader = self.*,
.new_interface = .{
- .buffer = &.{},
+ .buffer = buffer,
.vtable = &.{ .stream = Adapter.stream },
.seek = 0,
.end = 0,
@@ -393,10 +393,12 @@ pub const Adapter = struct {
fn stream(r: *std.io.Reader, w: *std.io.Writer, limit: std.io.Limit) std.io.Reader.StreamError!usize {
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", r));
const buf = limit.slice(try w.writableSliceGreedy(1));
- return a.derp_reader.read(buf) catch |err| {
+ const n = a.derp_reader.read(buf) catch |err| {
a.err = err;
return error.ReadFailed;
};
+ w.advance(n);
+ return n;
}
};
diff --git a/lib/std/Io/DeprecatedWriter.zig b/lib/std/Io/DeprecatedWriter.zig
index 391b985357..81774b357c 100644
--- a/lib/std/Io/DeprecatedWriter.zig
+++ b/lib/std/Io/DeprecatedWriter.zig
@@ -100,7 +100,12 @@ pub const Adapter = struct {
fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
_ = splat;
- const a: *@This() = @fieldParentPtr("new_interface", w);
+ const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
+ const buffered = w.buffered();
+ if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
+ a.err = err;
+ return error.WriteFailed;
+ });
return a.derp_writer.write(data[0]) catch |err| {
a.err = err;
return error.WriteFailed;
diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig
index f25e113522..da9e01dd2c 100644
--- a/lib/std/Io/Reader.zig
+++ b/lib/std/Io/Reader.zig
@@ -67,6 +67,18 @@ pub const VTable = struct {
///
/// This function is only called when `buffer` is empty.
discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard,
+
+ /// Ensures `capacity` more data can be buffered without rebasing.
+ ///
+ /// Asserts `capacity` is within buffer capacity, or that the stream ends
+ /// within `capacity` bytes.
+ ///
+ /// Only called when `capacity` cannot fit into the unused capacity of
+ /// `buffer`.
+ ///
+ /// The default implementation moves buffered data to the start of
+ /// `buffer`, setting `seek` to zero, and cannot fail.
+ rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase,
};
pub const StreamError = error{
@@ -97,6 +109,10 @@ pub const ShortError = error{
ReadFailed,
};
+pub const RebaseError = error{
+ EndOfStream,
+};
+
pub const failing: Reader = .{
.vtable = &.{
.stream = failingStream,
@@ -122,6 +138,7 @@ pub fn fixed(buffer: []const u8) Reader {
.vtable = &.{
.stream = endingStream,
.discard = endingDiscard,
+ .rebase = endingRebase,
},
// This cast is safe because all potential writes to it will instead
// return `error.EndOfStream`.
@@ -179,6 +196,38 @@ pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void {
while (remaining != 0) remaining -= try r.stream(w, .limited(remaining));
}
+/// "Pump" exactly `n` bytes from the reader to the writer.
+pub fn streamExact64(r: *Reader, w: *Writer, n: u64) StreamError!void {
+ var remaining = n;
+ while (remaining != 0) remaining -= try r.stream(w, .limited64(remaining));
+}
+
+/// "Pump" exactly `n` bytes from the reader to the writer.
+///
+/// When draining `w`, ensures that at least `preserve_len` bytes remain
+/// buffered.
+///
+/// Asserts `Writer.buffer` capacity exceeds `preserve_len`.
+pub fn streamExactPreserve(r: *Reader, w: *Writer, preserve_len: usize, n: usize) StreamError!void {
+ if (w.end + n <= w.buffer.len) {
+ @branchHint(.likely);
+ return streamExact(r, w, n);
+ }
+ // If `n` is large, we can ignore `preserve_len` up to a point.
+ var remaining = n;
+ while (remaining > preserve_len) {
+ assert(remaining != 0);
+ remaining -= try r.stream(w, .limited(remaining - preserve_len));
+ if (w.end + remaining <= w.buffer.len) return streamExact(r, w, remaining);
+ }
+ // All the next bytes received must be preserved.
+ if (preserve_len < w.end) {
+ @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]);
+ w.end = preserve_len;
+ }
+ return streamExact(r, w, remaining);
+}
+
/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as
/// a success case.
///
@@ -234,7 +283,7 @@ pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocErro
/// such case, the next byte that would be read will be the first one to exceed
/// `limit`, and all preceeding bytes have been appended to `list`.
///
-/// Asserts `buffer` has nonzero capacity.
+/// If `limit` is not `Limit.unlimited`, asserts `buffer` has nonzero capacity.
///
/// See also:
/// * `allocRemaining`
@@ -245,7 +294,7 @@ pub fn appendRemaining(
list: *std.ArrayListAlignedUnmanaged(u8, alignment),
limit: Limit,
) LimitedAllocError!void {
- assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data.
+ if (limit != .unlimited) assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data.
const buffer_contents = r.buffer[r.seek..r.end];
const copy_len = limit.minInt(buffer_contents.len);
try list.appendSlice(gpa, r.buffer[0..copy_len]);
@@ -748,11 +797,8 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
@branchHint(.likely);
return buffer[seek .. end + 1];
}
- if (r.vtable.stream == &endingStream) {
- // Protect the `@constCast` of `fixed`.
- return error.EndOfStream;
- }
- r.rebase();
+ // TODO take a parameter for max search length rather than relying on buffer capacity
+ try rebase(r, r.buffer.len);
while (r.buffer.len - r.end != 0) {
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
@@ -1018,11 +1064,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void {
};
if (r.seek + n <= r.end) return;
};
- if (r.vtable.stream == &endingStream) {
- // Protect the `@constCast` of `fixed`.
- return error.EndOfStream;
- }
- rebaseCapacity(r, n);
+ try rebase(r, n);
var writer: Writer = .{
.buffer = r.buffer,
.vtable = &.{ .drain = Writer.fixedDrain },
@@ -1042,7 +1084,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void {
///
/// Asserts buffer capacity is at least 1.
pub fn fillMore(r: *Reader) Error!void {
- rebaseCapacity(r, 1);
+ try rebase(r, 1);
var writer: Writer = .{
.buffer = r.buffer,
.end = r.end,
@@ -1219,7 +1261,7 @@ pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void {
if (n <= r.buffer.len) return;
- if (r.seek > 0) rebase(r);
+ if (r.seek > 0) rebase(r, r.buffer.len);
var list: ArrayList(u8) = .{
.items = r.buffer[0..r.end],
.capacity = r.buffer.len,
@@ -1235,37 +1277,6 @@ pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void
return fill(r, n);
}
-/// Returns a slice into the unused capacity of `buffer` with at least
-/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
-///
-/// After calling this function, typically the caller will follow up with a
-/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
-pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
- {
- const unused = r.buffer[r.end..];
- if (unused.len >= min_len) return unused;
- }
- if (r.seek > 0) rebase(r);
- {
- var list: ArrayList(u8) = .{
- .items = r.buffer[0..r.end],
- .capacity = r.buffer.len,
- };
- defer r.buffer = list.allocatedSlice();
- try list.ensureUnusedCapacity(allocator, min_len);
- }
- const unused = r.buffer[r.end..];
- assert(unused.len >= min_len);
- return unused;
-}
-
-/// After writing directly into the unused capacity of `buffer`, this function
-/// updates `end` so that users of `Reader` can receive the data.
-pub fn advanceBufferEnd(r: *Reader, n: usize) void {
- assert(n <= r.buffer.len - r.end);
- r.end += n;
-}
-
fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
const result_info = @typeInfo(Result).int;
comptime assert(result_info.bits % 7 == 0);
@@ -1296,37 +1307,20 @@ fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Resu
}
}
-/// Left-aligns data such that `r.seek` becomes zero.
-///
-/// If `r.seek` is not already zero then `buffer` is mutated, making it illegal
-/// to call this function with a const-casted `buffer`, such as in the case of
-/// `fixed`. This issue can be avoided:
-/// * in implementations, by attempting a read before a rebase, in which
-/// case the read will return `error.EndOfStream`, preventing the rebase.
-/// * in usage, by copying into a mutable buffer before initializing `fixed`.
-pub fn rebase(r: *Reader) void {
- if (r.seek == 0) return;
+/// Ensures `capacity` more data can be buffered without rebasing.
+pub fn rebase(r: *Reader, capacity: usize) RebaseError!void {
+ if (r.end + capacity <= r.buffer.len) return;
+ return r.vtable.rebase(r, capacity);
+}
+
+pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void {
+ if (r.end <= r.buffer.len - capacity) return;
const data = r.buffer[r.seek..r.end];
@memmove(r.buffer[0..data.len], data);
r.seek = 0;
r.end = data.len;
}
-/// Ensures `capacity` more data can be buffered without rebasing, by rebasing
-/// if necessary.
-///
-/// Asserts `capacity` is within the buffer capacity.
-///
-/// If the rebase occurs then `buffer` is mutated, making it illegal to call
-/// this function with a const-casted `buffer`, such as in the case of `fixed`.
-/// This issue can be avoided:
-/// * in implementations, by attempting a read before a rebase, in which
-/// case the read will return `error.EndOfStream`, preventing the rebase.
-/// * in usage, by copying into a mutable buffer before initializing `fixed`.
-pub fn rebaseCapacity(r: *Reader, capacity: usize) void {
- if (r.end > r.buffer.len - capacity) rebase(r);
-}
-
/// Advances the stream and decreases the size of the storage buffer by `n`,
/// returning the range of bytes no longer accessible by `r`.
///
@@ -1682,6 +1676,12 @@ fn endingDiscard(r: *Reader, limit: Limit) Error!usize {
return error.EndOfStream;
}
+fn endingRebase(r: *Reader, capacity: usize) RebaseError!void {
+ _ = r;
+ _ = capacity;
+ return error.EndOfStream;
+}
+
fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
_ = r;
_ = w;
diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig
index 0723073592..06a6534071 100644
--- a/lib/std/Io/Writer.zig
+++ b/lib/std/Io/Writer.zig
@@ -256,10 +256,10 @@ test "fixed buffer flush" {
try testing.expectEqual(10, buffer[0]);
}
-/// Calls `VTable.drain` but hides the last `preserve_length` bytes from the
+/// Calls `VTable.drain` but hides the last `preserve_len` bytes from the
/// implementation, keeping them buffered.
-pub fn drainPreserve(w: *Writer, preserve_length: usize) Error!void {
- const temp_end = w.end -| preserve_length;
+pub fn drainPreserve(w: *Writer, preserve_len: usize) Error!void {
+ const temp_end = w.end -| preserve_len;
const preserved = w.buffer[temp_end..w.end];
w.end = temp_end;
defer w.end += preserved.len;
@@ -310,24 +310,38 @@ pub fn writableSliceGreedy(w: *Writer, minimum_length: usize) Error![]u8 {
}
/// Asserts the provided buffer has total capacity enough for `minimum_length`
-/// and `preserve_length` combined.
+/// and `preserve_len` combined.
///
/// Does not `advance` the buffer end position.
///
-/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// When draining the buffer, ensures that at least `preserve_len` bytes
/// remain buffered.
///
-/// If `preserve_length` is zero, this is equivalent to `writableSliceGreedy`.
-pub fn writableSliceGreedyPreserve(w: *Writer, preserve_length: usize, minimum_length: usize) Error![]u8 {
- assert(w.buffer.len >= preserve_length + minimum_length);
+/// If `preserve_len` is zero, this is equivalent to `writableSliceGreedy`.
+pub fn writableSliceGreedyPreserve(w: *Writer, preserve_len: usize, minimum_length: usize) Error![]u8 {
+ assert(w.buffer.len >= preserve_len + minimum_length);
while (w.buffer.len - w.end < minimum_length) {
- try drainPreserve(w, preserve_length);
+ try drainPreserve(w, preserve_len);
} else {
@branchHint(.likely);
return w.buffer[w.end..];
}
}
+/// Asserts the provided buffer has total capacity enough for `len`.
+///
+/// Advances the buffer end position by `len`.
+///
+/// When draining the buffer, ensures that at least `preserve_len` bytes
+/// remain buffered.
+///
+/// If `preserve_len` is zero, this is equivalent to `writableSlice`.
+pub fn writableSlicePreserve(w: *Writer, preserve_len: usize, len: usize) Error![]u8 {
+ const big_slice = try w.writableSliceGreedyPreserve(preserve_len, len);
+ advance(w, len);
+ return big_slice[0..len];
+}
+
pub const WritableVectorIterator = struct {
first: []u8,
middle: []const []u8 = &.{},
@@ -523,16 +537,16 @@ pub fn write(w: *Writer, bytes: []const u8) Error!usize {
return w.vtable.drain(w, &.{bytes}, 1);
}
-/// Asserts `buffer` capacity exceeds `preserve_length`.
-pub fn writePreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!usize {
- assert(preserve_length <= w.buffer.len);
+/// Asserts `buffer` capacity exceeds `preserve_len`.
+pub fn writePreserve(w: *Writer, preserve_len: usize, bytes: []const u8) Error!usize {
+ assert(preserve_len <= w.buffer.len);
if (w.end + bytes.len <= w.buffer.len) {
@branchHint(.likely);
@memcpy(w.buffer[w.end..][0..bytes.len], bytes);
w.end += bytes.len;
return bytes.len;
}
- const temp_end = w.end -| preserve_length;
+ const temp_end = w.end -| preserve_len;
const preserved = w.buffer[temp_end..w.end];
w.end = temp_end;
defer w.end += preserved.len;
@@ -552,13 +566,13 @@ pub fn writeAll(w: *Writer, bytes: []const u8) Error!void {
/// Calls `drain` as many times as necessary such that all of `bytes` are
/// transferred.
///
-/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// When draining the buffer, ensures that at least `preserve_len` bytes
/// remain buffered.
///
-/// Asserts `buffer` capacity exceeds `preserve_length`.
-pub fn writeAllPreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!void {
+/// Asserts `buffer` capacity exceeds `preserve_len`.
+pub fn writeAllPreserve(w: *Writer, preserve_len: usize, bytes: []const u8) Error!void {
var index: usize = 0;
- while (index < bytes.len) index += try w.writePreserve(preserve_length, bytes[index..]);
+ while (index < bytes.len) index += try w.writePreserve(preserve_len, bytes[index..]);
}
/// Renders fmt string with args, calling `writer` with slices of bytes.
@@ -761,11 +775,11 @@ pub fn writeByte(w: *Writer, byte: u8) Error!void {
}
}
-/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// When draining the buffer, ensures that at least `preserve_len` bytes
/// remain buffered.
-pub fn writeBytePreserve(w: *Writer, preserve_length: usize, byte: u8) Error!void {
+pub fn writeBytePreserve(w: *Writer, preserve_len: usize, byte: u8) Error!void {
while (w.buffer.len - w.end == 0) {
- try drainPreserve(w, preserve_length);
+ try drainPreserve(w, preserve_len);
} else {
@branchHint(.likely);
w.buffer[w.end] = byte;
@@ -788,10 +802,42 @@ test splatByteAll {
try testing.expectEqualStrings("7" ** 45, aw.writer.buffered());
}
+pub fn splatBytePreserve(w: *Writer, preserve_len: usize, byte: u8, n: usize) Error!void {
+ const new_end = w.end + n;
+ if (new_end <= w.buffer.len) {
+ @memset(w.buffer[w.end..][0..n], byte);
+ w.end = new_end;
+ return;
+ }
+ // If `n` is large, we can ignore `preserve_len` up to a point.
+ var remaining = n;
+ while (remaining > preserve_len) {
+ assert(remaining != 0);
+ remaining -= try splatByte(w, byte, remaining - preserve_len);
+ if (w.end + remaining <= w.buffer.len) {
+ @memset(w.buffer[w.end..][0..remaining], byte);
+ w.end += remaining;
+ return;
+ }
+ }
+ // All the next bytes received must be preserved.
+ if (preserve_len < w.end) {
+ @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]);
+ w.end = preserve_len;
+ }
+ while (remaining > 0) remaining -= try w.splatByte(byte, remaining);
+}
+
/// Writes the same byte many times, allowing short writes.
///
/// Does maximum of one underlying `VTable.drain`.
pub fn splatByte(w: *Writer, byte: u8, n: usize) Error!usize {
+ if (w.end + n <= w.buffer.len) {
+ @branchHint(.likely);
+ @memset(w.buffer[w.end..][0..n], byte);
+ w.end += n;
+ return n;
+ }
return writeSplat(w, &.{&.{byte}}, n);
}
@@ -801,9 +847,10 @@ pub fn splatBytesAll(w: *Writer, bytes: []const u8, splat: usize) Error!void {
var remaining_bytes: usize = bytes.len * splat;
remaining_bytes -= try w.splatBytes(bytes, splat);
while (remaining_bytes > 0) {
- const leftover = remaining_bytes % bytes.len;
- const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover ..], bytes };
- remaining_bytes -= try w.writeSplat(&buffers, splat);
+ const leftover_splat = remaining_bytes / bytes.len;
+ const leftover_bytes = remaining_bytes % bytes.len;
+ const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover_bytes ..], bytes };
+ remaining_bytes -= try w.writeSplat(&buffers, leftover_splat);
}
}
@@ -1564,17 +1611,23 @@ pub fn printFloatHexOptions(w: *Writer, value: anytype, options: std.fmt.Number)
}
pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precision: ?usize) Error!void {
- if (std.math.signbit(value)) try w.writeByte('-');
- if (std.math.isNan(value)) return w.writeAll(switch (case) {
+ const v = switch (@TypeOf(value)) {
+ // comptime_float internally is a f128; this preserves precision.
+ comptime_float => @as(f128, value),
+ else => value,
+ };
+
+ if (std.math.signbit(v)) try w.writeByte('-');
+ if (std.math.isNan(v)) return w.writeAll(switch (case) {
.lower => "nan",
.upper => "NAN",
});
- if (std.math.isInf(value)) return w.writeAll(switch (case) {
+ if (std.math.isInf(v)) return w.writeAll(switch (case) {
.lower => "inf",
.upper => "INF",
});
- const T = @TypeOf(value);
+ const T = @TypeOf(v);
const TU = std.meta.Int(.unsigned, @bitSizeOf(T));
const mantissa_bits = std.math.floatMantissaBits(T);
@@ -1584,7 +1637,7 @@ pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precisi
const exponent_mask = (1 << exponent_bits) - 1;
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
- const as_bits: TU = @bitCast(value);
+ const as_bits: TU = @bitCast(v);
var mantissa = as_bits & mantissa_mask;
var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask));
@@ -2239,6 +2292,10 @@ pub const Discarding = struct {
pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
if (File.Handle == void) return error.Unimplemented;
+ switch (builtin.zig_backend) {
+ else => {},
+ .stage2_aarch64 => return error.Unimplemented,
+ }
const d: *Discarding = @alignCast(@fieldParentPtr("writer", w));
d.count += w.end;
w.end = 0;
diff --git a/lib/std/Io/buffered_atomic_file.zig b/lib/std/Io/buffered_atomic_file.zig
deleted file mode 100644
index 48510bde52..0000000000
--- a/lib/std/Io/buffered_atomic_file.zig
+++ /dev/null
@@ -1,55 +0,0 @@
-const std = @import("../std.zig");
-const mem = std.mem;
-const fs = std.fs;
-const File = std.fs.File;
-
-pub const BufferedAtomicFile = struct {
- atomic_file: fs.AtomicFile,
- file_writer: File.Writer,
- buffered_writer: BufferedWriter,
- allocator: mem.Allocator,
-
- pub const buffer_size = 4096;
- pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
- pub const Writer = std.io.GenericWriter(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write);
-
- /// TODO when https://github.com/ziglang/zig/issues/2761 is solved
- /// this API will not need an allocator
- pub fn create(
- allocator: mem.Allocator,
- dir: fs.Dir,
- dest_path: []const u8,
- atomic_file_options: fs.Dir.AtomicFileOptions,
- ) !*BufferedAtomicFile {
- var self = try allocator.create(BufferedAtomicFile);
- self.* = BufferedAtomicFile{
- .atomic_file = undefined,
- .file_writer = undefined,
- .buffered_writer = undefined,
- .allocator = allocator,
- };
- errdefer allocator.destroy(self);
-
- self.atomic_file = try dir.atomicFile(dest_path, atomic_file_options);
- errdefer self.atomic_file.deinit();
-
- self.file_writer = self.atomic_file.file.deprecatedWriter();
- self.buffered_writer = .{ .unbuffered_writer = self.file_writer };
- return self;
- }
-
- /// always call destroy, even after successful finish()
- pub fn destroy(self: *BufferedAtomicFile) void {
- self.atomic_file.deinit();
- self.allocator.destroy(self);
- }
-
- pub fn finish(self: *BufferedAtomicFile) !void {
- try self.buffered_writer.flush();
- try self.atomic_file.finish();
- }
-
- pub fn writer(self: *BufferedAtomicFile) Writer {
- return .{ .context = &self.buffered_writer };
- }
-};
diff --git a/lib/std/Io/c_writer.zig b/lib/std/Io/c_writer.zig
deleted file mode 100644
index 30d0cabcf5..0000000000
--- a/lib/std/Io/c_writer.zig
+++ /dev/null
@@ -1,44 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const io = std.io;
-const testing = std.testing;
-
-pub const CWriter = io.GenericWriter(*std.c.FILE, std.fs.File.WriteError, cWriterWrite);
-
-pub fn cWriter(c_file: *std.c.FILE) CWriter {
- return .{ .context = c_file };
-}
-
-fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize {
- const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file);
- if (amt_written >= 0) return amt_written;
- switch (@as(std.c.E, @enumFromInt(std.c._errno().*))) {
- .SUCCESS => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable, // this is a blocking API
- .BADF => unreachable, // always a race condition
- .DESTADDRREQ => unreachable, // connect was never called
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- else => |err| return std.posix.unexpectedErrno(err),
- }
-}
-
-test cWriter {
- if (!builtin.link_libc or builtin.os.tag == .wasi) return error.SkipZigTest;
-
- const filename = "tmp_io_test_file.txt";
- const out_file = std.c.fopen(filename, "w") orelse return error.UnableToOpenTestFile;
- defer {
- _ = std.c.fclose(out_file);
- std.fs.cwd().deleteFileZ(filename) catch {};
- }
-
- const writer = cWriter(out_file);
- try writer.print("hi: {}\n", .{@as(i32, 123)});
-}
diff --git a/lib/std/Io/change_detection_stream.zig b/lib/std/Io/change_detection_stream.zig
deleted file mode 100644
index d9da1c4a0e..0000000000
--- a/lib/std/Io/change_detection_stream.zig
+++ /dev/null
@@ -1,55 +0,0 @@
-const std = @import("../std.zig");
-const io = std.io;
-const mem = std.mem;
-const assert = std.debug.assert;
-
-/// Used to detect if the data written to a stream differs from a source buffer
-pub fn ChangeDetectionStream(comptime WriterType: type) type {
- return struct {
- const Self = @This();
- pub const Error = WriterType.Error;
- pub const Writer = io.GenericWriter(*Self, Error, write);
-
- anything_changed: bool,
- underlying_writer: WriterType,
- source_index: usize,
- source: []const u8,
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- if (!self.anything_changed) {
- const end = self.source_index + bytes.len;
- if (end > self.source.len) {
- self.anything_changed = true;
- } else {
- const src_slice = self.source[self.source_index..end];
- self.source_index += bytes.len;
- if (!mem.eql(u8, bytes, src_slice)) {
- self.anything_changed = true;
- }
- }
- }
-
- return self.underlying_writer.write(bytes);
- }
-
- pub fn changeDetected(self: *Self) bool {
- return self.anything_changed or (self.source_index != self.source.len);
- }
- };
-}
-
-pub fn changeDetectionStream(
- source: []const u8,
- underlying_writer: anytype,
-) ChangeDetectionStream(@TypeOf(underlying_writer)) {
- return ChangeDetectionStream(@TypeOf(underlying_writer)){
- .anything_changed = false,
- .underlying_writer = underlying_writer,
- .source_index = 0,
- .source = source,
- };
-}
diff --git a/lib/std/Io/find_byte_writer.zig b/lib/std/Io/find_byte_writer.zig
deleted file mode 100644
index fe6836f603..0000000000
--- a/lib/std/Io/find_byte_writer.zig
+++ /dev/null
@@ -1,40 +0,0 @@
-const std = @import("../std.zig");
-const io = std.io;
-const assert = std.debug.assert;
-
-/// A Writer that returns whether the given character has been written to it.
-/// The contents are not written to anything.
-pub fn FindByteWriter(comptime UnderlyingWriter: type) type {
- return struct {
- const Self = @This();
- pub const Error = UnderlyingWriter.Error;
- pub const Writer = io.GenericWriter(*Self, Error, write);
-
- underlying_writer: UnderlyingWriter,
- byte_found: bool,
- byte: u8,
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- if (!self.byte_found) {
- self.byte_found = blk: {
- for (bytes) |b|
- if (b == self.byte) break :blk true;
- break :blk false;
- };
- }
- return self.underlying_writer.write(bytes);
- }
- };
-}
-
-pub fn findByteWriter(byte: u8, underlying_writer: anytype) FindByteWriter(@TypeOf(underlying_writer)) {
- return FindByteWriter(@TypeOf(underlying_writer)){
- .underlying_writer = underlying_writer,
- .byte = byte,
- .byte_found = false,
- };
-}
diff --git a/lib/std/Io/multi_writer.zig b/lib/std/Io/multi_writer.zig
deleted file mode 100644
index 20e9e782de..0000000000
--- a/lib/std/Io/multi_writer.zig
+++ /dev/null
@@ -1,53 +0,0 @@
-const std = @import("../std.zig");
-const io = std.io;
-
-/// Takes a tuple of streams, and constructs a new stream that writes to all of them
-pub fn MultiWriter(comptime Writers: type) type {
- comptime var ErrSet = error{};
- inline for (@typeInfo(Writers).@"struct".fields) |field| {
- const StreamType = field.type;
- ErrSet = ErrSet || StreamType.Error;
- }
-
- return struct {
- const Self = @This();
-
- streams: Writers,
-
- pub const Error = ErrSet;
- pub const Writer = io.GenericWriter(*Self, Error, write);
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
- pub fn write(self: *Self, bytes: []const u8) Error!usize {
- inline for (self.streams) |stream|
- try stream.writeAll(bytes);
- return bytes.len;
- }
- };
-}
-
-pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) {
- return .{ .streams = streams };
-}
-
-const testing = std.testing;
-
-test "MultiWriter" {
- var tmp = testing.tmpDir(.{});
- defer tmp.cleanup();
- var f = try tmp.dir.createFile("t.txt", .{});
-
- var buf1: [255]u8 = undefined;
- var fbs1 = io.fixedBufferStream(&buf1);
- var buf2: [255]u8 = undefined;
- var stream = multiWriter(.{ fbs1.writer(), f.writer() });
-
- try stream.writer().print("HI", .{});
- f.close();
-
- try testing.expectEqualSlices(u8, "HI", fbs1.getWritten());
- try testing.expectEqualSlices(u8, "HI", try tmp.dir.readFile("t.txt", &buf2));
-}
diff --git a/lib/std/Io/stream_source.zig b/lib/std/Io/stream_source.zig
deleted file mode 100644
index 2a3527e479..0000000000
--- a/lib/std/Io/stream_source.zig
+++ /dev/null
@@ -1,127 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const io = std.io;
-
-/// Provides `io.GenericReader`, `io.GenericWriter`, and `io.SeekableStream` for in-memory buffers as
-/// well as files.
-/// For memory sources, if the supplied byte buffer is const, then `io.GenericWriter` is not available.
-/// The error set of the stream functions is the error set of the corresponding file functions.
-pub const StreamSource = union(enum) {
- // TODO: expose UEFI files to std.os in a way that allows this to be true
- const has_file = (builtin.os.tag != .freestanding and builtin.os.tag != .uefi);
-
- /// The stream access is redirected to this buffer.
- buffer: io.FixedBufferStream([]u8),
-
- /// The stream access is redirected to this buffer.
- /// Writing to the source will always yield `error.AccessDenied`.
- const_buffer: io.FixedBufferStream([]const u8),
-
- /// The stream access is redirected to this file.
- /// On freestanding, this must never be initialized!
- file: if (has_file) std.fs.File else void,
-
- pub const ReadError = io.FixedBufferStream([]u8).ReadError || (if (has_file) std.fs.File.ReadError else error{});
- pub const WriteError = error{AccessDenied} || io.FixedBufferStream([]u8).WriteError || (if (has_file) std.fs.File.WriteError else error{});
- pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{});
- pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{});
-
- pub const Reader = io.GenericReader(*StreamSource, ReadError, read);
- pub const Writer = io.GenericWriter(*StreamSource, WriteError, write);
- pub const SeekableStream = io.SeekableStream(
- *StreamSource,
- SeekError,
- GetSeekPosError,
- seekTo,
- seekBy,
- getPos,
- getEndPos,
- );
-
- pub fn read(self: *StreamSource, dest: []u8) ReadError!usize {
- switch (self.*) {
- .buffer => |*x| return x.read(dest),
- .const_buffer => |*x| return x.read(dest),
- .file => |x| if (!has_file) unreachable else return x.read(dest),
- }
- }
-
- pub fn write(self: *StreamSource, bytes: []const u8) WriteError!usize {
- switch (self.*) {
- .buffer => |*x| return x.write(bytes),
- .const_buffer => return error.AccessDenied,
- .file => |x| if (!has_file) unreachable else return x.write(bytes),
- }
- }
-
- pub fn seekTo(self: *StreamSource, pos: u64) SeekError!void {
- switch (self.*) {
- .buffer => |*x| return x.seekTo(pos),
- .const_buffer => |*x| return x.seekTo(pos),
- .file => |x| if (!has_file) unreachable else return x.seekTo(pos),
- }
- }
-
- pub fn seekBy(self: *StreamSource, amt: i64) SeekError!void {
- switch (self.*) {
- .buffer => |*x| return x.seekBy(amt),
- .const_buffer => |*x| return x.seekBy(amt),
- .file => |x| if (!has_file) unreachable else return x.seekBy(amt),
- }
- }
-
- pub fn getEndPos(self: *StreamSource) GetSeekPosError!u64 {
- switch (self.*) {
- .buffer => |*x| return x.getEndPos(),
- .const_buffer => |*x| return x.getEndPos(),
- .file => |x| if (!has_file) unreachable else return x.getEndPos(),
- }
- }
-
- pub fn getPos(self: *StreamSource) GetSeekPosError!u64 {
- switch (self.*) {
- .buffer => |*x| return x.getPos(),
- .const_buffer => |*x| return x.getPos(),
- .file => |x| if (!has_file) unreachable else return x.getPos(),
- }
- }
-
- pub fn reader(self: *StreamSource) Reader {
- return .{ .context = self };
- }
-
- pub fn writer(self: *StreamSource) Writer {
- return .{ .context = self };
- }
-
- pub fn seekableStream(self: *StreamSource) SeekableStream {
- return .{ .context = self };
- }
-};
-
-test "refs" {
- std.testing.refAllDecls(StreamSource);
-}
-
-test "mutable buffer" {
- var buffer: [64]u8 = undefined;
- var source = StreamSource{ .buffer = std.io.fixedBufferStream(&buffer) };
-
- var writer = source.writer();
-
- try writer.writeAll("Hello, World!");
-
- try std.testing.expectEqualStrings("Hello, World!", source.buffer.getWritten());
-}
-
-test "const buffer" {
- const buffer: [64]u8 = "Hello, World!".* ++ ([1]u8{0xAA} ** 51);
- var source = StreamSource{ .const_buffer = std.io.fixedBufferStream(&buffer) };
-
- var reader = source.reader();
-
- var dst_buffer: [13]u8 = undefined;
- try reader.readNoEof(&dst_buffer);
-
- try std.testing.expectEqualStrings("Hello, World!", &dst_buffer);
-}
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 2634553d25..2806c1a09c 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -25,6 +25,7 @@ redraw_event: std.Thread.ResetEvent,
/// Accessed atomically.
done: bool,
need_clear: bool,
+status: Status,
refresh_rate_ns: u64,
initial_delay_ns: u64,
@@ -47,6 +48,22 @@ node_freelist: Freelist,
/// value may at times temporarily exceed the node count.
node_end_index: u32,
+pub const Status = enum {
+ /// Indicates the application is progressing towards completion of a task.
+ /// Unless the application is interactive, this is the only status the
+ /// program will ever have!
+ working,
+ /// The application has completed an operation, and is now waiting for user
+ /// input rather than calling exit(0).
+ success,
+ /// The application encountered an error, and is now waiting for user input
+ /// rather than calling exit(1).
+ failure,
+ /// The application encountered at least one error, but is still working on
+ /// more tasks.
+ failure_working,
+};
+
const Freelist = packed struct(u32) {
head: Node.OptionalIndex,
/// Whenever `node_freelist` is added to, this generation is incremented
@@ -383,6 +400,7 @@ var global_progress: Progress = .{
.draw_buffer = undefined,
.done = false,
.need_clear = false,
+ .status = .working,
.node_parents = &node_parents_buffer,
.node_storage = &node_storage_buffer,
@@ -408,6 +426,9 @@ pub const have_ipc = switch (builtin.os.tag) {
const noop_impl = builtin.single_threaded or switch (builtin.os.tag) {
.wasi, .freestanding => true,
else => false,
+} or switch (builtin.zig_backend) {
+ .stage2_aarch64 => true,
+ else => false,
};
/// Initializes a global Progress instance.
@@ -495,6 +516,11 @@ pub fn start(options: Options) Node {
return root_node;
}
+pub fn setStatus(new_status: Status) void {
+ if (noop_impl) return;
+ @atomicStore(Status, &global_progress.status, new_status, .monotonic);
+}
+
/// Returns whether a resize is needed to learn the terminal size.
fn wait(timeout_ns: u64) bool {
const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_|
@@ -675,6 +701,14 @@ const save = "\x1b7";
const restore = "\x1b8";
const finish_sync = "\x1b[?2026l";
+const progress_remove = "\x1b]9;4;0\x07";
+const @"progress_normal {d}" = "\x1b]9;4;1;{d}\x07";
+const @"progress_error {d}" = "\x1b]9;4;2;{d}\x07";
+const progress_pulsing = "\x1b]9;4;3\x07";
+const progress_pulsing_error = "\x1b]9;4;2\x07";
+const progress_normal_100 = "\x1b]9;4;1;100\x07";
+const progress_error_100 = "\x1b]9;4;2;100\x07";
+
const TreeSymbol = enum {
/// ├─
tee,
@@ -754,10 +788,10 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
}
fn clearWrittenWithEscapeCodes() anyerror!void {
- if (!global_progress.need_clear) return;
+ if (noop_impl or !global_progress.need_clear) return;
global_progress.need_clear = false;
- try write(clear);
+ try write(clear ++ progress_remove);
}
/// U+25BA or â–º
@@ -1200,6 +1234,43 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
i, const nl_n = computeNode(buf, i, 0, serialized, children, root_node_index);
if (global_progress.terminal_mode == .ansi_escape_codes) {
+ {
+ // Set progress state https://conemu.github.io/en/AnsiEscapeCodes.html#ConEmu_specific_OSC
+ const root_storage = &serialized.storage[0];
+ const storage = if (root_storage.name[0] != 0 or children[0].child == .none) root_storage else &serialized.storage[@intFromEnum(children[0].child)];
+ const estimated_total = storage.estimated_total_count;
+ const completed_items = storage.completed_count;
+ const status = @atomicLoad(Status, &global_progress.status, .monotonic);
+ switch (status) {
+ .working => {
+ if (estimated_total == 0) {
+ buf[i..][0..progress_pulsing.len].* = progress_pulsing.*;
+ i += progress_pulsing.len;
+ } else {
+ const percent = completed_items * 100 / estimated_total;
+ i += (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent}) catch &.{}).len;
+ }
+ },
+ .success => {
+ buf[i..][0..progress_remove.len].* = progress_remove.*;
+ i += progress_remove.len;
+ },
+ .failure => {
+ buf[i..][0..progress_error_100.len].* = progress_error_100.*;
+ i += progress_error_100.len;
+ },
+ .failure_working => {
+ if (estimated_total == 0) {
+ buf[i..][0..progress_pulsing_error.len].* = progress_pulsing_error.*;
+ i += progress_pulsing_error.len;
+ } else {
+ const percent = completed_items * 100 / estimated_total;
+ i += (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent}) catch &.{}).len;
+ }
+ },
+ }
+ }
+
if (nl_n > 0) {
buf[i] = '\r';
i += 1;
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 8f4aefc713..54376426e2 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -772,7 +772,7 @@ pub const Endian = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
-pub const Signedness = enum {
+pub const Signedness = enum(u1) {
signed,
unsigned,
};
@@ -894,7 +894,10 @@ pub const VaList = switch (builtin.cpu.arch) {
.aarch64, .aarch64_be => switch (builtin.os.tag) {
.windows => *u8,
.ios, .macos, .tvos, .watchos, .visionos => *u8,
- else => @compileError("disabled due to miscompilations"), // VaListAarch64,
+ else => switch (builtin.zig_backend) {
+ .stage2_aarch64 => VaListAarch64,
+ else => @compileError("disabled due to miscompilations"),
+ },
},
.arm, .armeb, .thumb, .thumbeb => switch (builtin.os.tag) {
.ios, .macos, .tvos, .watchos, .visionos => *u8,
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 2880e3850a..e2f55dd6fb 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -7147,7 +7147,7 @@ pub const dirent = switch (native_os) {
off: off_t,
reclen: c_ushort,
type: u8,
- name: [256:0]u8,
+ name: [255:0]u8,
},
else => void,
};
@@ -10497,9 +10497,9 @@ pub const sysconf = switch (native_os) {
pub const sf_hdtr = switch (native_os) {
.freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct {
- headers: [*]const iovec_const,
+ headers: ?[*]const iovec_const,
hdr_cnt: c_int,
- trailers: [*]const iovec_const,
+ trailers: ?[*]const iovec_const,
trl_cnt: c_int,
},
else => void,
diff --git a/lib/std/compress.zig b/lib/std/compress.zig
index e07c3a4126..018de51001 100644
--- a/lib/std/compress.zig
+++ b/lib/std/compress.zig
@@ -1,75 +1,19 @@
//! Compression algorithms.
-const std = @import("std.zig");
-
pub const flate = @import("compress/flate.zig");
pub const gzip = @import("compress/gzip.zig");
pub const zlib = @import("compress/zlib.zig");
pub const lzma = @import("compress/lzma.zig");
pub const lzma2 = @import("compress/lzma2.zig");
pub const xz = @import("compress/xz.zig");
-pub const zstd = @import("compress/zstandard.zig");
-
-pub fn HashedReader(ReaderType: type, HasherType: type) type {
- return struct {
- child_reader: ReaderType,
- hasher: HasherType,
-
- pub const Error = ReaderType.Error;
- pub const Reader = std.io.GenericReader(*@This(), Error, read);
-
- pub fn read(self: *@This(), buf: []u8) Error!usize {
- const amt = try self.child_reader.read(buf);
- self.hasher.update(buf[0..amt]);
- return amt;
- }
-
- pub fn reader(self: *@This()) Reader {
- return .{ .context = self };
- }
- };
-}
-
-pub fn hashedReader(
- reader: anytype,
- hasher: anytype,
-) HashedReader(@TypeOf(reader), @TypeOf(hasher)) {
- return .{ .child_reader = reader, .hasher = hasher };
-}
-
-pub fn HashedWriter(WriterType: type, HasherType: type) type {
- return struct {
- child_writer: WriterType,
- hasher: HasherType,
-
- pub const Error = WriterType.Error;
- pub const Writer = std.io.GenericWriter(*@This(), Error, write);
-
- pub fn write(self: *@This(), buf: []const u8) Error!usize {
- const amt = try self.child_writer.write(buf);
- self.hasher.update(buf[0..amt]);
- return amt;
- }
-
- pub fn writer(self: *@This()) Writer {
- return .{ .context = self };
- }
- };
-}
-
-pub fn hashedWriter(
- writer: anytype,
- hasher: anytype,
-) HashedWriter(@TypeOf(writer), @TypeOf(hasher)) {
- return .{ .child_writer = writer, .hasher = hasher };
-}
+pub const zstd = @import("compress/zstd.zig");
test {
+ _ = flate;
_ = lzma;
_ = lzma2;
_ = xz;
_ = zstd;
- _ = flate;
_ = gzip;
_ = zlib;
}
diff --git a/lib/std/compress/xz.zig b/lib/std/compress/xz.zig
index 445d103098..6c99e9f427 100644
--- a/lib/std/compress/xz.zig
+++ b/lib/std/compress/xz.zig
@@ -12,17 +12,11 @@ pub const Check = enum(u4) {
};
fn readStreamFlags(reader: anytype, check: *Check) !void {
- var bit_reader = std.io.bitReader(.little, reader);
-
- const reserved1 = try bit_reader.readBitsNoEof(u8, 8);
- if (reserved1 != 0)
- return error.CorruptInput;
-
- check.* = @as(Check, @enumFromInt(try bit_reader.readBitsNoEof(u4, 4)));
-
- const reserved2 = try bit_reader.readBitsNoEof(u4, 4);
- if (reserved2 != 0)
- return error.CorruptInput;
+ const reserved1 = try reader.readByte();
+ if (reserved1 != 0) return error.CorruptInput;
+ const byte = try reader.readByte();
+ if ((byte >> 4) != 0) return error.CorruptInput;
+ check.* = @enumFromInt(@as(u4, @truncate(byte)));
}
pub fn decompress(allocator: Allocator, reader: anytype) !Decompress(@TypeOf(reader)) {
@@ -47,7 +41,7 @@ pub fn Decompress(comptime ReaderType: type) type {
var check: Check = undefined;
const hash_a = blk: {
- var hasher = std.compress.hashedReader(source, Crc32.init());
+ var hasher = hashedReader(source, Crc32.init());
try readStreamFlags(hasher.reader(), &check);
break :blk hasher.hasher.final();
};
@@ -80,7 +74,7 @@ pub fn Decompress(comptime ReaderType: type) type {
return r;
const index_size = blk: {
- var hasher = std.compress.hashedReader(self.in_reader, Crc32.init());
+ var hasher = hashedReader(self.in_reader, Crc32.init());
hasher.hasher.update(&[1]u8{0x00});
var counter = std.io.countingReader(hasher.reader());
@@ -115,7 +109,7 @@ pub fn Decompress(comptime ReaderType: type) type {
const hash_a = try self.in_reader.readInt(u32, .little);
const hash_b = blk: {
- var hasher = std.compress.hashedReader(self.in_reader, Crc32.init());
+ var hasher = hashedReader(self.in_reader, Crc32.init());
const hashed_reader = hasher.reader();
const backward_size = (@as(u64, try hashed_reader.readInt(u32, .little)) + 1) * 4;
@@ -140,6 +134,33 @@ pub fn Decompress(comptime ReaderType: type) type {
};
}
+pub fn HashedReader(ReaderType: type, HasherType: type) type {
+ return struct {
+ child_reader: ReaderType,
+ hasher: HasherType,
+
+ pub const Error = ReaderType.Error;
+ pub const Reader = std.io.GenericReader(*@This(), Error, read);
+
+ pub fn read(self: *@This(), buf: []u8) Error!usize {
+ const amt = try self.child_reader.read(buf);
+ self.hasher.update(buf[0..amt]);
+ return amt;
+ }
+
+ pub fn reader(self: *@This()) Reader {
+ return .{ .context = self };
+ }
+ };
+}
+
+pub fn hashedReader(
+ reader: anytype,
+ hasher: anytype,
+) HashedReader(@TypeOf(reader), @TypeOf(hasher)) {
+ return .{ .child_reader = reader, .hasher = hasher };
+}
+
test {
_ = @import("xz/test.zig");
}
diff --git a/lib/std/compress/xz/block.zig b/lib/std/compress/xz/block.zig
index 6253341f36..505dc543a8 100644
--- a/lib/std/compress/xz/block.zig
+++ b/lib/std/compress/xz/block.zig
@@ -91,7 +91,7 @@ pub fn Decoder(comptime ReaderType: type) type {
// Block Header
{
- var header_hasher = std.compress.hashedReader(block_reader, Crc32.init());
+ var header_hasher = xz.hashedReader(block_reader, Crc32.init());
const header_reader = header_hasher.reader();
const header_size = @as(u64, try header_reader.readByte()) * 4;
diff --git a/lib/std/compress/zstandard.zig b/lib/std/compress/zstandard.zig
deleted file mode 100644
index df45e9686d..0000000000
--- a/lib/std/compress/zstandard.zig
+++ /dev/null
@@ -1,310 +0,0 @@
-const std = @import("std");
-const RingBuffer = std.RingBuffer;
-
-const types = @import("zstandard/types.zig");
-pub const frame = types.frame;
-pub const compressed_block = types.compressed_block;
-
-pub const decompress = @import("zstandard/decompress.zig");
-
-pub const DecompressorOptions = struct {
- verify_checksum: bool = true,
- window_buffer: []u8,
-
- /// Recommended amount by the standard. Lower than this may result
- /// in inability to decompress common streams.
- pub const default_window_buffer_len = 8 * 1024 * 1024;
-};
-
-pub fn Decompressor(comptime ReaderType: type) type {
- return struct {
- const Self = @This();
-
- const table_size_max = types.compressed_block.table_size_max;
-
- source: std.io.CountingReader(ReaderType),
- state: enum { NewFrame, InFrame, LastBlock },
- decode_state: decompress.block.DecodeState,
- frame_context: decompress.FrameContext,
- buffer: WindowBuffer,
- literal_fse_buffer: [table_size_max.literal]types.compressed_block.Table.Fse,
- match_fse_buffer: [table_size_max.match]types.compressed_block.Table.Fse,
- offset_fse_buffer: [table_size_max.offset]types.compressed_block.Table.Fse,
- literals_buffer: [types.block_size_max]u8,
- sequence_buffer: [types.block_size_max]u8,
- verify_checksum: bool,
- checksum: ?u32,
- current_frame_decompressed_size: usize,
-
- const WindowBuffer = struct {
- data: []u8 = undefined,
- read_index: usize = 0,
- write_index: usize = 0,
- };
-
- pub const Error = ReaderType.Error || error{
- ChecksumFailure,
- DictionaryIdFlagUnsupported,
- MalformedBlock,
- MalformedFrame,
- OutOfMemory,
- };
-
- pub const Reader = std.io.GenericReader(*Self, Error, read);
-
- pub fn init(source: ReaderType, options: DecompressorOptions) Self {
- return .{
- .source = std.io.countingReader(source),
- .state = .NewFrame,
- .decode_state = undefined,
- .frame_context = undefined,
- .buffer = .{ .data = options.window_buffer },
- .literal_fse_buffer = undefined,
- .match_fse_buffer = undefined,
- .offset_fse_buffer = undefined,
- .literals_buffer = undefined,
- .sequence_buffer = undefined,
- .verify_checksum = options.verify_checksum,
- .checksum = undefined,
- .current_frame_decompressed_size = undefined,
- };
- }
-
- fn frameInit(self: *Self) !void {
- const source_reader = self.source.reader();
- switch (try decompress.decodeFrameHeader(source_reader)) {
- .skippable => |header| {
- try source_reader.skipBytes(header.frame_size, .{});
- self.state = .NewFrame;
- },
- .zstandard => |header| {
- const frame_context = try decompress.FrameContext.init(
- header,
- self.buffer.data.len,
- self.verify_checksum,
- );
-
- const decode_state = decompress.block.DecodeState.init(
- &self.literal_fse_buffer,
- &self.match_fse_buffer,
- &self.offset_fse_buffer,
- );
-
- self.decode_state = decode_state;
- self.frame_context = frame_context;
-
- self.checksum = null;
- self.current_frame_decompressed_size = 0;
-
- self.state = .InFrame;
- },
- }
- }
-
- pub fn reader(self: *Self) Reader {
- return .{ .context = self };
- }
-
- pub fn read(self: *Self, buffer: []u8) Error!usize {
- if (buffer.len == 0) return 0;
-
- var size: usize = 0;
- while (size == 0) {
- while (self.state == .NewFrame) {
- const initial_count = self.source.bytes_read;
- self.frameInit() catch |err| switch (err) {
- error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
- error.EndOfStream => return if (self.source.bytes_read == initial_count)
- 0
- else
- error.MalformedFrame,
- else => return error.MalformedFrame,
- };
- }
- size = try self.readInner(buffer);
- }
- return size;
- }
-
- fn readInner(self: *Self, buffer: []u8) Error!usize {
- std.debug.assert(self.state != .NewFrame);
-
- var ring_buffer = RingBuffer{
- .data = self.buffer.data,
- .read_index = self.buffer.read_index,
- .write_index = self.buffer.write_index,
- };
- defer {
- self.buffer.read_index = ring_buffer.read_index;
- self.buffer.write_index = ring_buffer.write_index;
- }
-
- const source_reader = self.source.reader();
- while (ring_buffer.isEmpty() and self.state != .LastBlock) {
- const header_bytes = source_reader.readBytesNoEof(3) catch
- return error.MalformedFrame;
- const block_header = decompress.block.decodeBlockHeader(&header_bytes);
-
- decompress.block.decodeBlockReader(
- &ring_buffer,
- source_reader,
- block_header,
- &self.decode_state,
- self.frame_context.block_size_max,
- &self.literals_buffer,
- &self.sequence_buffer,
- ) catch
- return error.MalformedBlock;
-
- if (self.frame_context.content_size) |size| {
- if (self.current_frame_decompressed_size > size) return error.MalformedFrame;
- }
-
- const size = ring_buffer.len();
- self.current_frame_decompressed_size += size;
-
- if (self.frame_context.hasher_opt) |*hasher| {
- if (size > 0) {
- const written_slice = ring_buffer.sliceLast(size);
- hasher.update(written_slice.first);
- hasher.update(written_slice.second);
- }
- }
- if (block_header.last_block) {
- self.state = .LastBlock;
- if (self.frame_context.has_checksum) {
- const checksum = source_reader.readInt(u32, .little) catch
- return error.MalformedFrame;
- if (self.verify_checksum) {
- if (self.frame_context.hasher_opt) |*hasher| {
- if (checksum != decompress.computeChecksum(hasher))
- return error.ChecksumFailure;
- }
- }
- }
- if (self.frame_context.content_size) |content_size| {
- if (content_size != self.current_frame_decompressed_size) {
- return error.MalformedFrame;
- }
- }
- }
- }
-
- const size = @min(ring_buffer.len(), buffer.len);
- if (size > 0) {
- ring_buffer.readFirstAssumeLength(buffer, size);
- }
- if (self.state == .LastBlock and ring_buffer.len() == 0) {
- self.state = .NewFrame;
- }
- return size;
- }
- };
-}
-
-pub fn decompressor(reader: anytype, options: DecompressorOptions) Decompressor(@TypeOf(reader)) {
- return Decompressor(@TypeOf(reader)).init(reader, options);
-}
-
-fn testDecompress(data: []const u8) ![]u8 {
- const window_buffer = try std.testing.allocator.alloc(u8, 1 << 23);
- defer std.testing.allocator.free(window_buffer);
-
- var in_stream = std.io.fixedBufferStream(data);
- var zstd_stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
- const result = zstd_stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
- return result;
-}
-
-fn testReader(data: []const u8, comptime expected: []const u8) !void {
- const buf = try testDecompress(data);
- defer std.testing.allocator.free(buf);
- try std.testing.expectEqualSlices(u8, expected, buf);
-}
-
-test "decompression" {
- const uncompressed = @embedFile("testdata/rfc8478.txt");
- const compressed3 = @embedFile("testdata/rfc8478.txt.zst.3");
- const compressed19 = @embedFile("testdata/rfc8478.txt.zst.19");
-
- const buffer = try std.testing.allocator.alloc(u8, uncompressed.len);
- defer std.testing.allocator.free(buffer);
-
- const res3 = try decompress.decode(buffer, compressed3, true);
- try std.testing.expectEqual(uncompressed.len, res3);
- try std.testing.expectEqualSlices(u8, uncompressed, buffer);
-
- @memset(buffer, undefined);
- const res19 = try decompress.decode(buffer, compressed19, true);
- try std.testing.expectEqual(uncompressed.len, res19);
- try std.testing.expectEqualSlices(u8, uncompressed, buffer);
-
- try testReader(compressed3, uncompressed);
- try testReader(compressed19, uncompressed);
-}
-
-fn expectEqualDecoded(expected: []const u8, input: []const u8) !void {
- {
- const result = try decompress.decodeAlloc(std.testing.allocator, input, false, 1 << 23);
- defer std.testing.allocator.free(result);
- try std.testing.expectEqualStrings(expected, result);
- }
-
- {
- var buffer = try std.testing.allocator.alloc(u8, 2 * expected.len);
- defer std.testing.allocator.free(buffer);
-
- const size = try decompress.decode(buffer, input, false);
- try std.testing.expectEqualStrings(expected, buffer[0..size]);
- }
-}
-
-fn expectEqualDecodedStreaming(expected: []const u8, input: []const u8) !void {
- const window_buffer = try std.testing.allocator.alloc(u8, 1 << 23);
- defer std.testing.allocator.free(window_buffer);
-
- var in_stream = std.io.fixedBufferStream(input);
- var stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
-
- const result = try stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
- defer std.testing.allocator.free(result);
-
- try std.testing.expectEqualStrings(expected, result);
-}
-
-test "zero sized block" {
- const input_raw =
- "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
- "\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
- "\x01\x00\x00"; // block header with: last_block set, block_type raw, block_size zero
-
- const input_rle =
- "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
- "\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
- "\x03\x00\x00" ++ // block header with: last_block set, block_type rle, block_size zero
- "\xaa"; // block_content
-
- try expectEqualDecoded("", input_raw);
- try expectEqualDecoded("", input_rle);
- try expectEqualDecodedStreaming("", input_raw);
- try expectEqualDecodedStreaming("", input_rle);
-}
-
-test "declared raw literals size too large" {
- const input_raw =
- "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
- "\x00\x00" ++ // frame header: everything unset, window descriptor zero
- "\x95\x00\x00" ++ // block header with: last_block set, block_type compressed, block_size 18
- "\xbc\xf3\xae" ++ // literals section header with: type raw, size_format 3, regenerated_size 716603
- "\xa5\x9f\xe3"; // some bytes of literal content - the content is shorter than regenerated_size
-
- // Note that the regenerated_size in the above input is larger than block maximum size, so the
- // block can't be valid as it is a raw literals block.
-
- var fbs = std.io.fixedBufferStream(input_raw);
- var window: [1024]u8 = undefined;
- var stream = decompressor(fbs.reader(), .{ .window_buffer = &window });
-
- var buf: [1024]u8 = undefined;
- try std.testing.expectError(error.MalformedBlock, stream.read(&buf));
-}
diff --git a/lib/std/compress/zstandard/decode/block.zig b/lib/std/compress/zstandard/decode/block.zig
deleted file mode 100644
index 49c6e7dc36..0000000000
--- a/lib/std/compress/zstandard/decode/block.zig
+++ /dev/null
@@ -1,1149 +0,0 @@
-const std = @import("std");
-const assert = std.debug.assert;
-const RingBuffer = std.RingBuffer;
-
-const types = @import("../types.zig");
-const frame = types.frame;
-const Table = types.compressed_block.Table;
-const LiteralsSection = types.compressed_block.LiteralsSection;
-const SequencesSection = types.compressed_block.SequencesSection;
-
-const huffman = @import("huffman.zig");
-const readers = @import("../readers.zig");
-
-const decodeFseTable = @import("fse.zig").decodeFseTable;
-
-pub const Error = error{
- BlockSizeOverMaximum,
- MalformedBlockSize,
- ReservedBlock,
- MalformedRleBlock,
- MalformedCompressedBlock,
-};
-
-pub const DecodeState = struct {
- repeat_offsets: [3]u32,
-
- offset: StateData(8),
- match: StateData(9),
- literal: StateData(9),
-
- offset_fse_buffer: []Table.Fse,
- match_fse_buffer: []Table.Fse,
- literal_fse_buffer: []Table.Fse,
-
- fse_tables_undefined: bool,
-
- literal_stream_reader: readers.ReverseBitReader,
- literal_stream_index: usize,
- literal_streams: LiteralsSection.Streams,
- literal_header: LiteralsSection.Header,
- huffman_tree: ?LiteralsSection.HuffmanTree,
-
- literal_written_count: usize,
- written_count: usize = 0,
-
- fn StateData(comptime max_accuracy_log: comptime_int) type {
- return struct {
- state: State,
- table: Table,
- accuracy_log: u8,
-
- const State = std.meta.Int(.unsigned, max_accuracy_log);
- };
- }
-
- pub fn init(
- literal_fse_buffer: []Table.Fse,
- match_fse_buffer: []Table.Fse,
- offset_fse_buffer: []Table.Fse,
- ) DecodeState {
- return DecodeState{
- .repeat_offsets = .{
- types.compressed_block.start_repeated_offset_1,
- types.compressed_block.start_repeated_offset_2,
- types.compressed_block.start_repeated_offset_3,
- },
-
- .offset = undefined,
- .match = undefined,
- .literal = undefined,
-
- .literal_fse_buffer = literal_fse_buffer,
- .match_fse_buffer = match_fse_buffer,
- .offset_fse_buffer = offset_fse_buffer,
-
- .fse_tables_undefined = true,
-
- .literal_written_count = 0,
- .literal_header = undefined,
- .literal_streams = undefined,
- .literal_stream_reader = undefined,
- .literal_stream_index = undefined,
- .huffman_tree = null,
-
- .written_count = 0,
- };
- }
-
- /// Prepare the decoder to decode a compressed block. Loads the literals
- /// stream and Huffman tree from `literals` and reads the FSE tables from
- /// `source`.
- ///
- /// Errors returned:
- /// - `error.BitStreamHasNoStartBit` if the (reversed) literal bitstream's
- /// first byte does not have any bits set
- /// - `error.TreelessLiteralsFirst` `literals` is a treeless literals
- /// section and the decode state does not have a Huffman tree from a
- /// previous block
- /// - `error.RepeatModeFirst` on the first call if one of the sequence FSE
- /// tables is set to repeat mode
- /// - `error.MalformedAccuracyLog` if an FSE table has an invalid accuracy
- /// - `error.MalformedFseTable` if there are errors decoding an FSE table
- /// - `error.EndOfStream` if `source` ends before all FSE tables are read
- pub fn prepare(
- self: *DecodeState,
- source: anytype,
- literals: LiteralsSection,
- sequences_header: SequencesSection.Header,
- ) !void {
- self.literal_written_count = 0;
- self.literal_header = literals.header;
- self.literal_streams = literals.streams;
-
- if (literals.huffman_tree) |tree| {
- self.huffman_tree = tree;
- } else if (literals.header.block_type == .treeless and self.huffman_tree == null) {
- return error.TreelessLiteralsFirst;
- }
-
- switch (literals.header.block_type) {
- .raw, .rle => {},
- .compressed, .treeless => {
- self.literal_stream_index = 0;
- switch (literals.streams) {
- .one => |slice| try self.initLiteralStream(slice),
- .four => |streams| try self.initLiteralStream(streams[0]),
- }
- },
- }
-
- if (sequences_header.sequence_count > 0) {
- try self.updateFseTable(source, .literal, sequences_header.literal_lengths);
- try self.updateFseTable(source, .offset, sequences_header.offsets);
- try self.updateFseTable(source, .match, sequences_header.match_lengths);
- self.fse_tables_undefined = false;
- }
- }
-
- /// Read initial FSE states for sequence decoding.
- ///
- /// Errors returned:
- /// - `error.EndOfStream` if `bit_reader` does not contain enough bits.
- pub fn readInitialFseState(self: *DecodeState, bit_reader: *readers.ReverseBitReader) error{EndOfStream}!void {
- self.literal.state = try bit_reader.readBitsNoEof(u9, self.literal.accuracy_log);
- self.offset.state = try bit_reader.readBitsNoEof(u8, self.offset.accuracy_log);
- self.match.state = try bit_reader.readBitsNoEof(u9, self.match.accuracy_log);
- }
-
- fn updateRepeatOffset(self: *DecodeState, offset: u32) void {
- self.repeat_offsets[2] = self.repeat_offsets[1];
- self.repeat_offsets[1] = self.repeat_offsets[0];
- self.repeat_offsets[0] = offset;
- }
-
- fn useRepeatOffset(self: *DecodeState, index: usize) u32 {
- if (index == 1)
- std.mem.swap(u32, &self.repeat_offsets[0], &self.repeat_offsets[1])
- else if (index == 2) {
- std.mem.swap(u32, &self.repeat_offsets[0], &self.repeat_offsets[2]);
- std.mem.swap(u32, &self.repeat_offsets[1], &self.repeat_offsets[2]);
- }
- return self.repeat_offsets[0];
- }
-
- const DataType = enum { offset, match, literal };
-
- fn updateState(
- self: *DecodeState,
- comptime choice: DataType,
- bit_reader: *readers.ReverseBitReader,
- ) error{ MalformedFseBits, EndOfStream }!void {
- switch (@field(self, @tagName(choice)).table) {
- .rle => {},
- .fse => |table| {
- const data = table[@field(self, @tagName(choice)).state];
- const T = @TypeOf(@field(self, @tagName(choice))).State;
- const bits_summand = try bit_reader.readBitsNoEof(T, data.bits);
- const next_state = std.math.cast(
- @TypeOf(@field(self, @tagName(choice))).State,
- data.baseline + bits_summand,
- ) orelse return error.MalformedFseBits;
- @field(self, @tagName(choice)).state = next_state;
- },
- }
- }
-
- const FseTableError = error{
- MalformedFseTable,
- MalformedAccuracyLog,
- RepeatModeFirst,
- EndOfStream,
- };
-
- fn updateFseTable(
- self: *DecodeState,
- source: anytype,
- comptime choice: DataType,
- mode: SequencesSection.Header.Mode,
- ) !void {
- const field_name = @tagName(choice);
- switch (mode) {
- .predefined => {
- @field(self, field_name).accuracy_log =
- @field(types.compressed_block.default_accuracy_log, field_name);
-
- @field(self, field_name).table =
- @field(types.compressed_block, "predefined_" ++ field_name ++ "_fse_table");
- },
- .rle => {
- @field(self, field_name).accuracy_log = 0;
- @field(self, field_name).table = .{ .rle = try source.readByte() };
- },
- .fse => {
- var bit_reader = readers.bitReader(source);
-
- const table_size = try decodeFseTable(
- &bit_reader,
- @field(types.compressed_block.table_symbol_count_max, field_name),
- @field(types.compressed_block.table_accuracy_log_max, field_name),
- @field(self, field_name ++ "_fse_buffer"),
- );
- @field(self, field_name).table = .{
- .fse = @field(self, field_name ++ "_fse_buffer")[0..table_size],
- };
- @field(self, field_name).accuracy_log = std.math.log2_int_ceil(usize, table_size);
- },
- .repeat => if (self.fse_tables_undefined) return error.RepeatModeFirst,
- }
- }
-
- const Sequence = struct {
- literal_length: u32,
- match_length: u32,
- offset: u32,
- };
-
- fn nextSequence(
- self: *DecodeState,
- bit_reader: *readers.ReverseBitReader,
- ) error{ InvalidBitStream, EndOfStream }!Sequence {
- const raw_code = self.getCode(.offset);
- const offset_code = std.math.cast(u5, raw_code) orelse {
- return error.InvalidBitStream;
- };
- const offset_value = (@as(u32, 1) << offset_code) + try bit_reader.readBitsNoEof(u32, offset_code);
-
- const match_code = self.getCode(.match);
- if (match_code >= types.compressed_block.match_length_code_table.len)
- return error.InvalidBitStream;
- const match = types.compressed_block.match_length_code_table[match_code];
- const match_length = match[0] + try bit_reader.readBitsNoEof(u32, match[1]);
-
- const literal_code = self.getCode(.literal);
- if (literal_code >= types.compressed_block.literals_length_code_table.len)
- return error.InvalidBitStream;
- const literal = types.compressed_block.literals_length_code_table[literal_code];
- const literal_length = literal[0] + try bit_reader.readBitsNoEof(u32, literal[1]);
-
- const offset = if (offset_value > 3) offset: {
- const offset = offset_value - 3;
- self.updateRepeatOffset(offset);
- break :offset offset;
- } else offset: {
- if (literal_length == 0) {
- if (offset_value == 3) {
- const offset = self.repeat_offsets[0] - 1;
- self.updateRepeatOffset(offset);
- break :offset offset;
- }
- break :offset self.useRepeatOffset(offset_value);
- }
- break :offset self.useRepeatOffset(offset_value - 1);
- };
-
- if (offset == 0) return error.InvalidBitStream;
-
- return .{
- .literal_length = literal_length,
- .match_length = match_length,
- .offset = offset,
- };
- }
-
- fn executeSequenceSlice(
- self: *DecodeState,
- dest: []u8,
- write_pos: usize,
- sequence: Sequence,
- ) (error{MalformedSequence} || DecodeLiteralsError)!void {
- if (sequence.offset > write_pos + sequence.literal_length) return error.MalformedSequence;
-
- try self.decodeLiteralsSlice(dest[write_pos..], sequence.literal_length);
- const copy_start = write_pos + sequence.literal_length - sequence.offset;
- for (
- dest[write_pos + sequence.literal_length ..][0..sequence.match_length],
- dest[copy_start..][0..sequence.match_length],
- ) |*d, s| d.* = s;
- self.written_count += sequence.match_length;
- }
-
- fn executeSequenceRingBuffer(
- self: *DecodeState,
- dest: *RingBuffer,
- sequence: Sequence,
- ) (error{MalformedSequence} || DecodeLiteralsError)!void {
- if (sequence.offset > @min(dest.data.len, self.written_count + sequence.literal_length))
- return error.MalformedSequence;
-
- try self.decodeLiteralsRingBuffer(dest, sequence.literal_length);
- const copy_start = dest.write_index + dest.data.len - sequence.offset;
- const copy_slice = dest.sliceAt(copy_start, sequence.match_length);
- dest.writeSliceForwardsAssumeCapacity(copy_slice.first);
- dest.writeSliceForwardsAssumeCapacity(copy_slice.second);
- self.written_count += sequence.match_length;
- }
-
- const DecodeSequenceError = error{
- InvalidBitStream,
- EndOfStream,
- MalformedSequence,
- MalformedFseBits,
- } || DecodeLiteralsError;
-
- /// Decode one sequence from `bit_reader` into `dest`, written starting at
- /// `write_pos` and update FSE states if `last_sequence` is `false`.
- /// `prepare()` must be called for the block before attempting to decode
- /// sequences.
- ///
- /// Errors returned:
- /// - `error.MalformedSequence` if the decompressed sequence would be
- /// longer than `sequence_size_limit` or the sequence's offset is too
- /// large
- /// - `error.UnexpectedEndOfLiteralStream` if the decoder state's literal
- /// streams do not contain enough literals for the sequence (this may
- /// mean the literal stream or the sequence is malformed).
- /// - `error.InvalidBitStream` if the FSE sequence bitstream is malformed
- /// - `error.EndOfStream` if `bit_reader` does not contain enough bits
- /// - `error.DestTooSmall` if `dest` is not large enough to holde the
- /// decompressed sequence
- pub fn decodeSequenceSlice(
- self: *DecodeState,
- dest: []u8,
- write_pos: usize,
- bit_reader: *readers.ReverseBitReader,
- sequence_size_limit: usize,
- last_sequence: bool,
- ) (error{DestTooSmall} || DecodeSequenceError)!usize {
- const sequence = try self.nextSequence(bit_reader);
- const sequence_length = @as(usize, sequence.literal_length) + sequence.match_length;
- if (sequence_length > sequence_size_limit) return error.MalformedSequence;
- if (sequence_length > dest[write_pos..].len) return error.DestTooSmall;
-
- try self.executeSequenceSlice(dest, write_pos, sequence);
- if (!last_sequence) {
- try self.updateState(.literal, bit_reader);
- try self.updateState(.match, bit_reader);
- try self.updateState(.offset, bit_reader);
- }
- return sequence_length;
- }
-
- /// Decode one sequence from `bit_reader` into `dest`; see
- /// `decodeSequenceSlice`.
- pub fn decodeSequenceRingBuffer(
- self: *DecodeState,
- dest: *RingBuffer,
- bit_reader: anytype,
- sequence_size_limit: usize,
- last_sequence: bool,
- ) DecodeSequenceError!usize {
- const sequence = try self.nextSequence(bit_reader);
- const sequence_length = @as(usize, sequence.literal_length) + sequence.match_length;
- if (sequence_length > sequence_size_limit) return error.MalformedSequence;
-
- try self.executeSequenceRingBuffer(dest, sequence);
- if (!last_sequence) {
- try self.updateState(.literal, bit_reader);
- try self.updateState(.match, bit_reader);
- try self.updateState(.offset, bit_reader);
- }
- return sequence_length;
- }
-
- fn nextLiteralMultiStream(
- self: *DecodeState,
- ) error{BitStreamHasNoStartBit}!void {
- self.literal_stream_index += 1;
- try self.initLiteralStream(self.literal_streams.four[self.literal_stream_index]);
- }
-
- fn initLiteralStream(self: *DecodeState, bytes: []const u8) error{BitStreamHasNoStartBit}!void {
- try self.literal_stream_reader.init(bytes);
- }
-
- fn isLiteralStreamEmpty(self: *DecodeState) bool {
- switch (self.literal_streams) {
- .one => return self.literal_stream_reader.isEmpty(),
- .four => return self.literal_stream_index == 3 and self.literal_stream_reader.isEmpty(),
- }
- }
-
- const LiteralBitsError = error{
- BitStreamHasNoStartBit,
- UnexpectedEndOfLiteralStream,
- };
- fn readLiteralsBits(
- self: *DecodeState,
- bit_count_to_read: u16,
- ) LiteralBitsError!u16 {
- return self.literal_stream_reader.readBitsNoEof(u16, bit_count_to_read) catch bits: {
- if (self.literal_streams == .four and self.literal_stream_index < 3) {
- try self.nextLiteralMultiStream();
- break :bits self.literal_stream_reader.readBitsNoEof(u16, bit_count_to_read) catch
- return error.UnexpectedEndOfLiteralStream;
- } else {
- return error.UnexpectedEndOfLiteralStream;
- }
- };
- }
-
- const DecodeLiteralsError = error{
- MalformedLiteralsLength,
- NotFound,
- } || LiteralBitsError;
-
- /// Decode `len` bytes of literals into `dest`.
- ///
- /// Errors returned:
- /// - `error.MalformedLiteralsLength` if the number of literal bytes
- /// decoded by `self` plus `len` is greater than the regenerated size of
- /// `literals`
- /// - `error.UnexpectedEndOfLiteralStream` and `error.NotFound` if there
- /// are problems decoding Huffman compressed literals
- pub fn decodeLiteralsSlice(
- self: *DecodeState,
- dest: []u8,
- len: usize,
- ) DecodeLiteralsError!void {
- if (self.literal_written_count + len > self.literal_header.regenerated_size)
- return error.MalformedLiteralsLength;
-
- switch (self.literal_header.block_type) {
- .raw => {
- const literal_data = self.literal_streams.one[self.literal_written_count..][0..len];
- @memcpy(dest[0..len], literal_data);
- self.literal_written_count += len;
- self.written_count += len;
- },
- .rle => {
- for (0..len) |i| {
- dest[i] = self.literal_streams.one[0];
- }
- self.literal_written_count += len;
- self.written_count += len;
- },
- .compressed, .treeless => {
- // const written_bytes_per_stream = (literals.header.regenerated_size + 3) / 4;
- const huffman_tree = self.huffman_tree orelse unreachable;
- const max_bit_count = huffman_tree.max_bit_count;
- const starting_bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
- huffman_tree.nodes[huffman_tree.symbol_count_minus_one].weight,
- max_bit_count,
- );
- var bits_read: u4 = 0;
- var huffman_tree_index: usize = huffman_tree.symbol_count_minus_one;
- var bit_count_to_read: u4 = starting_bit_count;
- for (0..len) |i| {
- var prefix: u16 = 0;
- while (true) {
- const new_bits = self.readLiteralsBits(bit_count_to_read) catch |err| {
- return err;
- };
- prefix <<= bit_count_to_read;
- prefix |= new_bits;
- bits_read += bit_count_to_read;
- const result = huffman_tree.query(huffman_tree_index, prefix) catch |err| {
- return err;
- };
-
- switch (result) {
- .symbol => |sym| {
- dest[i] = sym;
- bit_count_to_read = starting_bit_count;
- bits_read = 0;
- huffman_tree_index = huffman_tree.symbol_count_minus_one;
- break;
- },
- .index => |index| {
- huffman_tree_index = index;
- const bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
- huffman_tree.nodes[index].weight,
- max_bit_count,
- );
- bit_count_to_read = bit_count - bits_read;
- },
- }
- }
- }
- self.literal_written_count += len;
- self.written_count += len;
- },
- }
- }
-
- /// Decode literals into `dest`; see `decodeLiteralsSlice()`.
- pub fn decodeLiteralsRingBuffer(
- self: *DecodeState,
- dest: *RingBuffer,
- len: usize,
- ) DecodeLiteralsError!void {
- if (self.literal_written_count + len > self.literal_header.regenerated_size)
- return error.MalformedLiteralsLength;
-
- switch (self.literal_header.block_type) {
- .raw => {
- const literals_end = self.literal_written_count + len;
- const literal_data = self.literal_streams.one[self.literal_written_count..literals_end];
- dest.writeSliceAssumeCapacity(literal_data);
- self.literal_written_count += len;
- self.written_count += len;
- },
- .rle => {
- for (0..len) |_| {
- dest.writeAssumeCapacity(self.literal_streams.one[0]);
- }
- self.literal_written_count += len;
- self.written_count += len;
- },
- .compressed, .treeless => {
- // const written_bytes_per_stream = (literals.header.regenerated_size + 3) / 4;
- const huffman_tree = self.huffman_tree orelse unreachable;
- const max_bit_count = huffman_tree.max_bit_count;
- const starting_bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
- huffman_tree.nodes[huffman_tree.symbol_count_minus_one].weight,
- max_bit_count,
- );
- var bits_read: u4 = 0;
- var huffman_tree_index: usize = huffman_tree.symbol_count_minus_one;
- var bit_count_to_read: u4 = starting_bit_count;
- for (0..len) |_| {
- var prefix: u16 = 0;
- while (true) {
- const new_bits = try self.readLiteralsBits(bit_count_to_read);
- prefix <<= bit_count_to_read;
- prefix |= new_bits;
- bits_read += bit_count_to_read;
- const result = try huffman_tree.query(huffman_tree_index, prefix);
-
- switch (result) {
- .symbol => |sym| {
- dest.writeAssumeCapacity(sym);
- bit_count_to_read = starting_bit_count;
- bits_read = 0;
- huffman_tree_index = huffman_tree.symbol_count_minus_one;
- break;
- },
- .index => |index| {
- huffman_tree_index = index;
- const bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
- huffman_tree.nodes[index].weight,
- max_bit_count,
- );
- bit_count_to_read = bit_count - bits_read;
- },
- }
- }
- }
- self.literal_written_count += len;
- self.written_count += len;
- },
- }
- }
-
- fn getCode(self: *DecodeState, comptime choice: DataType) u32 {
- return switch (@field(self, @tagName(choice)).table) {
- .rle => |value| value,
- .fse => |table| table[@field(self, @tagName(choice)).state].symbol,
- };
- }
-};
-
-/// Decode a single block from `src` into `dest`. The beginning of `src` must be
-/// the start of the block content (i.e. directly after the block header).
-/// Increments `consumed_count` by the number of bytes read from `src` to decode
-/// the block and returns the decompressed size of the block.
-///
-/// Errors returned:
-///
-/// - `error.BlockSizeOverMaximum` if block's size is larger than 1 << 17 or
-/// `dest[written_count..].len`
-/// - `error.MalformedBlockSize` if `src.len` is smaller than the block size
-/// and the block is a raw or compressed block
-/// - `error.ReservedBlock` if the block is a reserved block
-/// - `error.MalformedRleBlock` if the block is an RLE block and `src.len < 1`
-/// - `error.MalformedCompressedBlock` if there are errors decoding a
-/// compressed block
-/// - `error.DestTooSmall` is `dest` is not large enough to hold the
-/// decompressed block
-pub fn decodeBlock(
- dest: []u8,
- src: []const u8,
- block_header: frame.Zstandard.Block.Header,
- decode_state: *DecodeState,
- consumed_count: *usize,
- block_size_max: usize,
- written_count: usize,
-) (error{DestTooSmall} || Error)!usize {
- const block_size = block_header.block_size;
- if (block_size_max < block_size) return error.BlockSizeOverMaximum;
- switch (block_header.block_type) {
- .raw => {
- if (src.len < block_size) return error.MalformedBlockSize;
- if (dest[written_count..].len < block_size) return error.DestTooSmall;
- @memcpy(dest[written_count..][0..block_size], src[0..block_size]);
- consumed_count.* += block_size;
- decode_state.written_count += block_size;
- return block_size;
- },
- .rle => {
- if (src.len < 1) return error.MalformedRleBlock;
- if (dest[written_count..].len < block_size) return error.DestTooSmall;
- for (written_count..block_size + written_count) |write_pos| {
- dest[write_pos] = src[0];
- }
- consumed_count.* += 1;
- decode_state.written_count += block_size;
- return block_size;
- },
- .compressed => {
- if (src.len < block_size) return error.MalformedBlockSize;
- var bytes_read: usize = 0;
- const literals = decodeLiteralsSectionSlice(src[0..block_size], &bytes_read) catch
- return error.MalformedCompressedBlock;
- var fbs = std.io.fixedBufferStream(src[bytes_read..block_size]);
- const fbs_reader = fbs.reader();
- const sequences_header = decodeSequencesHeader(fbs_reader) catch
- return error.MalformedCompressedBlock;
-
- decode_state.prepare(fbs_reader, literals, sequences_header) catch
- return error.MalformedCompressedBlock;
-
- bytes_read += fbs.pos;
-
- var bytes_written: usize = 0;
- {
- const bit_stream_bytes = src[bytes_read..block_size];
- var bit_stream: readers.ReverseBitReader = undefined;
- bit_stream.init(bit_stream_bytes) catch return error.MalformedCompressedBlock;
-
- if (sequences_header.sequence_count > 0) {
- decode_state.readInitialFseState(&bit_stream) catch
- return error.MalformedCompressedBlock;
-
- var sequence_size_limit = block_size_max;
- for (0..sequences_header.sequence_count) |i| {
- const write_pos = written_count + bytes_written;
- const decompressed_size = decode_state.decodeSequenceSlice(
- dest,
- write_pos,
- &bit_stream,
- sequence_size_limit,
- i == sequences_header.sequence_count - 1,
- ) catch |err| switch (err) {
- error.DestTooSmall => return error.DestTooSmall,
- else => return error.MalformedCompressedBlock,
- };
- bytes_written += decompressed_size;
- sequence_size_limit -= decompressed_size;
- }
- }
-
- if (!bit_stream.isEmpty()) {
- return error.MalformedCompressedBlock;
- }
- }
-
- if (decode_state.literal_written_count < literals.header.regenerated_size) {
- const len = literals.header.regenerated_size - decode_state.literal_written_count;
- if (len > dest[written_count + bytes_written ..].len) return error.DestTooSmall;
- decode_state.decodeLiteralsSlice(dest[written_count + bytes_written ..], len) catch
- return error.MalformedCompressedBlock;
- bytes_written += len;
- }
-
- switch (decode_state.literal_header.block_type) {
- .treeless, .compressed => {
- if (!decode_state.isLiteralStreamEmpty()) return error.MalformedCompressedBlock;
- },
- .raw, .rle => {},
- }
-
- consumed_count.* += block_size;
- return bytes_written;
- },
- .reserved => return error.ReservedBlock,
- }
-}
-
-/// Decode a single block from `src` into `dest`; see `decodeBlock()`. Returns
-/// the size of the decompressed block, which can be used with `dest.sliceLast()`
-/// to get the decompressed bytes. `error.BlockSizeOverMaximum` is returned if
-/// the block's compressed or decompressed size is larger than `block_size_max`.
-pub fn decodeBlockRingBuffer(
- dest: *RingBuffer,
- src: []const u8,
- block_header: frame.Zstandard.Block.Header,
- decode_state: *DecodeState,
- consumed_count: *usize,
- block_size_max: usize,
-) Error!usize {
- const block_size = block_header.block_size;
- if (block_size_max < block_size) return error.BlockSizeOverMaximum;
- switch (block_header.block_type) {
- .raw => {
- if (src.len < block_size) return error.MalformedBlockSize;
- // dest may have length zero if block_size == 0, causing division by zero in
- // writeSliceAssumeCapacity()
- if (block_size > 0) {
- const data = src[0..block_size];
- dest.writeSliceAssumeCapacity(data);
- consumed_count.* += block_size;
- decode_state.written_count += block_size;
- }
- return block_size;
- },
- .rle => {
- if (src.len < 1) return error.MalformedRleBlock;
- for (0..block_size) |_| {
- dest.writeAssumeCapacity(src[0]);
- }
- consumed_count.* += 1;
- decode_state.written_count += block_size;
- return block_size;
- },
- .compressed => {
- if (src.len < block_size) return error.MalformedBlockSize;
- var bytes_read: usize = 0;
- const literals = decodeLiteralsSectionSlice(src[0..block_size], &bytes_read) catch
- return error.MalformedCompressedBlock;
- var fbs = std.io.fixedBufferStream(src[bytes_read..block_size]);
- const fbs_reader = fbs.reader();
- const sequences_header = decodeSequencesHeader(fbs_reader) catch
- return error.MalformedCompressedBlock;
-
- decode_state.prepare(fbs_reader, literals, sequences_header) catch
- return error.MalformedCompressedBlock;
-
- bytes_read += fbs.pos;
-
- var bytes_written: usize = 0;
- {
- const bit_stream_bytes = src[bytes_read..block_size];
- var bit_stream: readers.ReverseBitReader = undefined;
- bit_stream.init(bit_stream_bytes) catch return error.MalformedCompressedBlock;
-
- if (sequences_header.sequence_count > 0) {
- decode_state.readInitialFseState(&bit_stream) catch
- return error.MalformedCompressedBlock;
-
- var sequence_size_limit = block_size_max;
- for (0..sequences_header.sequence_count) |i| {
- const decompressed_size = decode_state.decodeSequenceRingBuffer(
- dest,
- &bit_stream,
- sequence_size_limit,
- i == sequences_header.sequence_count - 1,
- ) catch return error.MalformedCompressedBlock;
- bytes_written += decompressed_size;
- sequence_size_limit -= decompressed_size;
- }
- }
-
- if (!bit_stream.isEmpty()) {
- return error.MalformedCompressedBlock;
- }
- }
-
- if (decode_state.literal_written_count < literals.header.regenerated_size) {
- const len = literals.header.regenerated_size - decode_state.literal_written_count;
- decode_state.decodeLiteralsRingBuffer(dest, len) catch
- return error.MalformedCompressedBlock;
- bytes_written += len;
- }
-
- switch (decode_state.literal_header.block_type) {
- .treeless, .compressed => {
- if (!decode_state.isLiteralStreamEmpty()) return error.MalformedCompressedBlock;
- },
- .raw, .rle => {},
- }
-
- consumed_count.* += block_size;
- if (bytes_written > block_size_max) return error.BlockSizeOverMaximum;
- return bytes_written;
- },
- .reserved => return error.ReservedBlock,
- }
-}
-
-/// Decode a single block from `source` into `dest`. Literal and sequence data
-/// from the block is copied into `literals_buffer` and `sequence_buffer`, which
-/// must be large enough or `error.LiteralsBufferTooSmall` and
-/// `error.SequenceBufferTooSmall` are returned (the maximum block size is an
-/// upper bound for the size of both buffers). See `decodeBlock`
-/// and `decodeBlockRingBuffer` for function that can decode a block without
-/// these extra copies. `error.EndOfStream` is returned if `source` does not
-/// contain enough bytes.
-pub fn decodeBlockReader(
- dest: *RingBuffer,
- source: anytype,
- block_header: frame.Zstandard.Block.Header,
- decode_state: *DecodeState,
- block_size_max: usize,
- literals_buffer: []u8,
- sequence_buffer: []u8,
-) !void {
- const block_size = block_header.block_size;
- var block_reader_limited = std.io.limitedReader(source, block_size);
- const block_reader = block_reader_limited.reader();
- if (block_size_max < block_size) return error.BlockSizeOverMaximum;
- switch (block_header.block_type) {
- .raw => {
- if (block_size == 0) return;
- const slice = dest.sliceAt(dest.write_index, block_size);
- try source.readNoEof(slice.first);
- try source.readNoEof(slice.second);
- dest.write_index = dest.mask2(dest.write_index + block_size);
- decode_state.written_count += block_size;
- },
- .rle => {
- const byte = try source.readByte();
- for (0..block_size) |_| {
- dest.writeAssumeCapacity(byte);
- }
- decode_state.written_count += block_size;
- },
- .compressed => {
- const literals = try decodeLiteralsSection(block_reader, literals_buffer);
- const sequences_header = try decodeSequencesHeader(block_reader);
-
- try decode_state.prepare(block_reader, literals, sequences_header);
-
- var bytes_written: usize = 0;
- {
- const size = try block_reader.readAll(sequence_buffer);
- var bit_stream: readers.ReverseBitReader = undefined;
- try bit_stream.init(sequence_buffer[0..size]);
-
- if (sequences_header.sequence_count > 0) {
- if (sequence_buffer.len < block_reader_limited.bytes_left)
- return error.SequenceBufferTooSmall;
-
- decode_state.readInitialFseState(&bit_stream) catch
- return error.MalformedCompressedBlock;
-
- var sequence_size_limit = block_size_max;
- for (0..sequences_header.sequence_count) |i| {
- const decompressed_size = decode_state.decodeSequenceRingBuffer(
- dest,
- &bit_stream,
- sequence_size_limit,
- i == sequences_header.sequence_count - 1,
- ) catch return error.MalformedCompressedBlock;
- sequence_size_limit -= decompressed_size;
- bytes_written += decompressed_size;
- }
- }
-
- if (!bit_stream.isEmpty()) {
- return error.MalformedCompressedBlock;
- }
- }
-
- if (decode_state.literal_written_count < literals.header.regenerated_size) {
- const len = literals.header.regenerated_size - decode_state.literal_written_count;
- decode_state.decodeLiteralsRingBuffer(dest, len) catch
- return error.MalformedCompressedBlock;
- bytes_written += len;
- }
-
- switch (decode_state.literal_header.block_type) {
- .treeless, .compressed => {
- if (!decode_state.isLiteralStreamEmpty()) return error.MalformedCompressedBlock;
- },
- .raw, .rle => {},
- }
-
- if (bytes_written > block_size_max) return error.BlockSizeOverMaximum;
- if (block_reader_limited.bytes_left != 0) return error.MalformedCompressedBlock;
- decode_state.literal_written_count = 0;
- },
- .reserved => return error.ReservedBlock,
- }
-}
-
-/// Decode the header of a block.
-pub fn decodeBlockHeader(src: *const [3]u8) frame.Zstandard.Block.Header {
- const last_block = src[0] & 1 == 1;
- const block_type = @as(frame.Zstandard.Block.Type, @enumFromInt((src[0] & 0b110) >> 1));
- const block_size = ((src[0] & 0b11111000) >> 3) + (@as(u21, src[1]) << 5) + (@as(u21, src[2]) << 13);
- return .{
- .last_block = last_block,
- .block_type = block_type,
- .block_size = block_size,
- };
-}
-
-/// Decode the header of a block.
-///
-/// Errors returned:
-/// - `error.EndOfStream` if `src.len < 3`
-pub fn decodeBlockHeaderSlice(src: []const u8) error{EndOfStream}!frame.Zstandard.Block.Header {
- if (src.len < 3) return error.EndOfStream;
- return decodeBlockHeader(src[0..3]);
-}
-
-/// Decode a `LiteralsSection` from `src`, incrementing `consumed_count` by the
-/// number of bytes the section uses.
-///
-/// Errors returned:
-/// - `error.MalformedLiteralsHeader` if the header is invalid
-/// - `error.MalformedLiteralsSection` if there are decoding errors
-/// - `error.MalformedAccuracyLog` if compressed literals have invalid
-/// accuracy
-/// - `error.MalformedFseTable` if compressed literals have invalid FSE table
-/// - `error.MalformedHuffmanTree` if there are errors decoding a Huffamn tree
-/// - `error.EndOfStream` if there are not enough bytes in `src`
-pub fn decodeLiteralsSectionSlice(
- src: []const u8,
- consumed_count: *usize,
-) (error{ MalformedLiteralsHeader, MalformedLiteralsSection, EndOfStream } || huffman.Error)!LiteralsSection {
- var bytes_read: usize = 0;
- const header = header: {
- var fbs = std.io.fixedBufferStream(src);
- defer bytes_read = fbs.pos;
- break :header decodeLiteralsHeader(fbs.reader()) catch return error.MalformedLiteralsHeader;
- };
- switch (header.block_type) {
- .raw => {
- if (src.len < bytes_read + header.regenerated_size) return error.MalformedLiteralsSection;
- const stream = src[bytes_read..][0..header.regenerated_size];
- consumed_count.* += header.regenerated_size + bytes_read;
- return LiteralsSection{
- .header = header,
- .huffman_tree = null,
- .streams = .{ .one = stream },
- };
- },
- .rle => {
- if (src.len < bytes_read + 1) return error.MalformedLiteralsSection;
- const stream = src[bytes_read..][0..1];
- consumed_count.* += 1 + bytes_read;
- return LiteralsSection{
- .header = header,
- .huffman_tree = null,
- .streams = .{ .one = stream },
- };
- },
- .compressed, .treeless => {
- const huffman_tree_start = bytes_read;
- const huffman_tree = if (header.block_type == .compressed)
- try huffman.decodeHuffmanTreeSlice(src[bytes_read..], &bytes_read)
- else
- null;
- const huffman_tree_size = bytes_read - huffman_tree_start;
- const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch
- return error.MalformedLiteralsSection;
-
- if (src.len < bytes_read + total_streams_size) return error.MalformedLiteralsSection;
- const stream_data = src[bytes_read .. bytes_read + total_streams_size];
-
- const streams = try decodeStreams(header.size_format, stream_data);
- consumed_count.* += bytes_read + total_streams_size;
- return LiteralsSection{
- .header = header,
- .huffman_tree = huffman_tree,
- .streams = streams,
- };
- },
- }
-}
-
-/// Decode a `LiteralsSection` from `src`, incrementing `consumed_count` by the
-/// number of bytes the section uses. See `decodeLiterasSectionSlice()`.
-pub fn decodeLiteralsSection(
- source: anytype,
- buffer: []u8,
-) !LiteralsSection {
- const header = try decodeLiteralsHeader(source);
- switch (header.block_type) {
- .raw => {
- if (buffer.len < header.regenerated_size) return error.LiteralsBufferTooSmall;
- try source.readNoEof(buffer[0..header.regenerated_size]);
- return LiteralsSection{
- .header = header,
- .huffman_tree = null,
- .streams = .{ .one = buffer },
- };
- },
- .rle => {
- buffer[0] = try source.readByte();
- return LiteralsSection{
- .header = header,
- .huffman_tree = null,
- .streams = .{ .one = buffer[0..1] },
- };
- },
- .compressed, .treeless => {
- var counting_reader = std.io.countingReader(source);
- const huffman_tree = if (header.block_type == .compressed)
- try huffman.decodeHuffmanTree(counting_reader.reader(), buffer)
- else
- null;
- const huffman_tree_size = @as(usize, @intCast(counting_reader.bytes_read));
- const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch
- return error.MalformedLiteralsSection;
-
- if (total_streams_size > buffer.len) return error.LiteralsBufferTooSmall;
- try source.readNoEof(buffer[0..total_streams_size]);
- const stream_data = buffer[0..total_streams_size];
-
- const streams = try decodeStreams(header.size_format, stream_data);
- return LiteralsSection{
- .header = header,
- .huffman_tree = huffman_tree,
- .streams = streams,
- };
- },
- }
-}
-
-fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Streams {
- if (size_format == 0) {
- return .{ .one = stream_data };
- }
-
- if (stream_data.len < 6) return error.MalformedLiteralsSection;
-
- const stream_1_length: usize = std.mem.readInt(u16, stream_data[0..2], .little);
- const stream_2_length: usize = std.mem.readInt(u16, stream_data[2..4], .little);
- const stream_3_length: usize = std.mem.readInt(u16, stream_data[4..6], .little);
-
- const stream_1_start = 6;
- const stream_2_start = stream_1_start + stream_1_length;
- const stream_3_start = stream_2_start + stream_2_length;
- const stream_4_start = stream_3_start + stream_3_length;
-
- if (stream_data.len < stream_4_start) return error.MalformedLiteralsSection;
-
- return .{ .four = .{
- stream_data[stream_1_start .. stream_1_start + stream_1_length],
- stream_data[stream_2_start .. stream_2_start + stream_2_length],
- stream_data[stream_3_start .. stream_3_start + stream_3_length],
- stream_data[stream_4_start..],
- } };
-}
-
-/// Decode a literals section header.
-///
-/// Errors returned:
-/// - `error.EndOfStream` if there are not enough bytes in `source`
-pub fn decodeLiteralsHeader(source: anytype) !LiteralsSection.Header {
- const byte0 = try source.readByte();
- const block_type = @as(LiteralsSection.BlockType, @enumFromInt(byte0 & 0b11));
- const size_format = @as(u2, @intCast((byte0 & 0b1100) >> 2));
- var regenerated_size: u20 = undefined;
- var compressed_size: ?u18 = null;
- switch (block_type) {
- .raw, .rle => {
- switch (size_format) {
- 0, 2 => {
- regenerated_size = byte0 >> 3;
- },
- 1 => regenerated_size = (byte0 >> 4) + (@as(u20, try source.readByte()) << 4),
- 3 => regenerated_size = (byte0 >> 4) +
- (@as(u20, try source.readByte()) << 4) +
- (@as(u20, try source.readByte()) << 12),
- }
- },
- .compressed, .treeless => {
- const byte1 = try source.readByte();
- const byte2 = try source.readByte();
- switch (size_format) {
- 0, 1 => {
- regenerated_size = (byte0 >> 4) + ((@as(u20, byte1) & 0b00111111) << 4);
- compressed_size = ((byte1 & 0b11000000) >> 6) + (@as(u18, byte2) << 2);
- },
- 2 => {
- const byte3 = try source.readByte();
- regenerated_size = (byte0 >> 4) + (@as(u20, byte1) << 4) + ((@as(u20, byte2) & 0b00000011) << 12);
- compressed_size = ((byte2 & 0b11111100) >> 2) + (@as(u18, byte3) << 6);
- },
- 3 => {
- const byte3 = try source.readByte();
- const byte4 = try source.readByte();
- regenerated_size = (byte0 >> 4) + (@as(u20, byte1) << 4) + ((@as(u20, byte2) & 0b00111111) << 12);
- compressed_size = ((byte2 & 0b11000000) >> 6) + (@as(u18, byte3) << 2) + (@as(u18, byte4) << 10);
- },
- }
- },
- }
- return LiteralsSection.Header{
- .block_type = block_type,
- .size_format = size_format,
- .regenerated_size = regenerated_size,
- .compressed_size = compressed_size,
- };
-}
-
-/// Decode a sequences section header.
-///
-/// Errors returned:
-/// - `error.ReservedBitSet` if the reserved bit is set
-/// - `error.EndOfStream` if there are not enough bytes in `source`
-pub fn decodeSequencesHeader(
- source: anytype,
-) !SequencesSection.Header {
- var sequence_count: u24 = undefined;
-
- const byte0 = try source.readByte();
- if (byte0 == 0) {
- return SequencesSection.Header{
- .sequence_count = 0,
- .offsets = undefined,
- .match_lengths = undefined,
- .literal_lengths = undefined,
- };
- } else if (byte0 < 128) {
- sequence_count = byte0;
- } else if (byte0 < 255) {
- sequence_count = (@as(u24, (byte0 - 128)) << 8) + try source.readByte();
- } else {
- sequence_count = (try source.readByte()) + (@as(u24, try source.readByte()) << 8) + 0x7F00;
- }
-
- const compression_modes = try source.readByte();
-
- const matches_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00001100) >> 2));
- const offsets_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00110000) >> 4));
- const literal_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b11000000) >> 6));
- if (compression_modes & 0b11 != 0) return error.ReservedBitSet;
-
- return SequencesSection.Header{
- .sequence_count = sequence_count,
- .offsets = offsets_mode,
- .match_lengths = matches_mode,
- .literal_lengths = literal_mode,
- };
-}
diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig
deleted file mode 100644
index 6e987f9c6f..0000000000
--- a/lib/std/compress/zstandard/decode/fse.zig
+++ /dev/null
@@ -1,153 +0,0 @@
-const std = @import("std");
-const assert = std.debug.assert;
-
-const types = @import("../types.zig");
-const Table = types.compressed_block.Table;
-
-pub fn decodeFseTable(
- bit_reader: anytype,
- expected_symbol_count: usize,
- max_accuracy_log: u4,
- entries: []Table.Fse,
-) !usize {
- const accuracy_log_biased = try bit_reader.readBitsNoEof(u4, 4);
- if (accuracy_log_biased > max_accuracy_log -| 5) return error.MalformedAccuracyLog;
- const accuracy_log = accuracy_log_biased + 5;
-
- var values: [256]u16 = undefined;
- var value_count: usize = 0;
-
- const total_probability = @as(u16, 1) << accuracy_log;
- var accumulated_probability: u16 = 0;
-
- while (accumulated_probability < total_probability) {
- // WARNING: The RFC is poorly worded, and would suggest std.math.log2_int_ceil is correct here,
- // but power of two (remaining probabilities + 1) need max bits set to 1 more.
- const max_bits = std.math.log2_int(u16, total_probability - accumulated_probability + 1) + 1;
- const small = try bit_reader.readBitsNoEof(u16, max_bits - 1);
-
- const cutoff = (@as(u16, 1) << max_bits) - 1 - (total_probability - accumulated_probability + 1);
-
- const value = if (small < cutoff)
- small
- else value: {
- const value_read = small + (try bit_reader.readBitsNoEof(u16, 1) << (max_bits - 1));
- break :value if (value_read < @as(u16, 1) << (max_bits - 1))
- value_read
- else
- value_read - cutoff;
- };
-
- accumulated_probability += if (value != 0) value - 1 else 1;
-
- values[value_count] = value;
- value_count += 1;
-
- if (value == 1) {
- while (true) {
- const repeat_flag = try bit_reader.readBitsNoEof(u2, 2);
- if (repeat_flag + value_count > 256) return error.MalformedFseTable;
- for (0..repeat_flag) |_| {
- values[value_count] = 1;
- value_count += 1;
- }
- if (repeat_flag < 3) break;
- }
- }
- if (value_count == 256) break;
- }
- bit_reader.alignToByte();
-
- if (value_count < 2) return error.MalformedFseTable;
- if (accumulated_probability != total_probability) return error.MalformedFseTable;
- if (value_count > expected_symbol_count) return error.MalformedFseTable;
-
- const table_size = total_probability;
-
- try buildFseTable(values[0..value_count], entries[0..table_size]);
- return table_size;
-}
-
-fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
- const total_probability = @as(u16, @intCast(entries.len));
- const accuracy_log = std.math.log2_int(u16, total_probability);
- assert(total_probability <= 1 << 9);
-
- var less_than_one_count: usize = 0;
- for (values, 0..) |value, i| {
- if (value == 0) {
- entries[entries.len - 1 - less_than_one_count] = Table.Fse{
- .symbol = @as(u8, @intCast(i)),
- .baseline = 0,
- .bits = accuracy_log,
- };
- less_than_one_count += 1;
- }
- }
-
- var position: usize = 0;
- var temp_states: [1 << 9]u16 = undefined;
- for (values, 0..) |value, symbol| {
- if (value == 0 or value == 1) continue;
- const probability = value - 1;
-
- const state_share_dividend = std.math.ceilPowerOfTwo(u16, probability) catch
- return error.MalformedFseTable;
- const share_size = @divExact(total_probability, state_share_dividend);
- const double_state_count = state_share_dividend - probability;
- const single_state_count = probability - double_state_count;
- const share_size_log = std.math.log2_int(u16, share_size);
-
- for (0..probability) |i| {
- temp_states[i] = @as(u16, @intCast(position));
- position += (entries.len >> 1) + (entries.len >> 3) + 3;
- position &= entries.len - 1;
- while (position >= entries.len - less_than_one_count) {
- position += (entries.len >> 1) + (entries.len >> 3) + 3;
- position &= entries.len - 1;
- }
- }
- std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
- for (0..probability) |i| {
- entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
- .symbol = @as(u8, @intCast(symbol)),
- .bits = share_size_log + 1,
- .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size,
- } else Table.Fse{
- .symbol = @as(u8, @intCast(symbol)),
- .bits = share_size_log,
- .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size,
- };
- }
- }
-}
-
-test buildFseTable {
- const literals_length_default_values = [36]u16{
- 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 2, 2, 2, 2, 2,
- 0, 0, 0, 0,
- };
-
- const match_lengths_default_values = [53]u16{
- 2, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0,
- 0, 0, 0, 0, 0,
- };
-
- const offset_codes_default_values = [29]u16{
- 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0,
- };
-
- var entries: [64]Table.Fse = undefined;
- try buildFseTable(&literals_length_default_values, &entries);
- try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_literal_fse_table.fse, &entries);
-
- try buildFseTable(&match_lengths_default_values, &entries);
- try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_match_fse_table.fse, &entries);
-
- try buildFseTable(&offset_codes_default_values, entries[0..32]);
- try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_offset_fse_table.fse, entries[0..32]);
-}
diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig
deleted file mode 100644
index 4728ccd027..0000000000
--- a/lib/std/compress/zstandard/decode/huffman.zig
+++ /dev/null
@@ -1,234 +0,0 @@
-const std = @import("std");
-
-const types = @import("../types.zig");
-const LiteralsSection = types.compressed_block.LiteralsSection;
-const Table = types.compressed_block.Table;
-
-const readers = @import("../readers.zig");
-
-const decodeFseTable = @import("fse.zig").decodeFseTable;
-
-pub const Error = error{
- MalformedHuffmanTree,
- MalformedFseTable,
- MalformedAccuracyLog,
- EndOfStream,
-};
-
-fn decodeFseHuffmanTree(
- source: anytype,
- compressed_size: usize,
- buffer: []u8,
- weights: *[256]u4,
-) !usize {
- var stream = std.io.limitedReader(source, compressed_size);
- var bit_reader = readers.bitReader(stream.reader());
-
- var entries: [1 << 6]Table.Fse = undefined;
- const table_size = decodeFseTable(&bit_reader, 256, 6, &entries) catch |err| switch (err) {
- error.MalformedAccuracyLog, error.MalformedFseTable => |e| return e,
- error.EndOfStream => return error.MalformedFseTable,
- else => |e| return e,
- };
- const accuracy_log = std.math.log2_int_ceil(usize, table_size);
-
- const amount = try stream.reader().readAll(buffer);
- var huff_bits: readers.ReverseBitReader = undefined;
- huff_bits.init(buffer[0..amount]) catch return error.MalformedHuffmanTree;
-
- return assignWeights(&huff_bits, accuracy_log, &entries, weights);
-}
-
-fn decodeFseHuffmanTreeSlice(src: []const u8, compressed_size: usize, weights: *[256]u4) !usize {
- if (src.len < compressed_size) return error.MalformedHuffmanTree;
- var stream = std.io.fixedBufferStream(src[0..compressed_size]);
- var counting_reader = std.io.countingReader(stream.reader());
- var bit_reader = readers.bitReader(counting_reader.reader());
-
- var entries: [1 << 6]Table.Fse = undefined;
- const table_size = decodeFseTable(&bit_reader, 256, 6, &entries) catch |err| switch (err) {
- error.MalformedAccuracyLog, error.MalformedFseTable => |e| return e,
- error.EndOfStream => return error.MalformedFseTable,
- };
- const accuracy_log = std.math.log2_int_ceil(usize, table_size);
-
- const start_index = std.math.cast(usize, counting_reader.bytes_read) orelse
- return error.MalformedHuffmanTree;
- const huff_data = src[start_index..compressed_size];
- var huff_bits: readers.ReverseBitReader = undefined;
- huff_bits.init(huff_data) catch return error.MalformedHuffmanTree;
-
- return assignWeights(&huff_bits, accuracy_log, &entries, weights);
-}
-
-fn assignWeights(
- huff_bits: *readers.ReverseBitReader,
- accuracy_log: u16,
- entries: *[1 << 6]Table.Fse,
- weights: *[256]u4,
-) !usize {
- var i: usize = 0;
- var even_state: u32 = huff_bits.readBitsNoEof(u32, accuracy_log) catch return error.MalformedHuffmanTree;
- var odd_state: u32 = huff_bits.readBitsNoEof(u32, accuracy_log) catch return error.MalformedHuffmanTree;
-
- while (i < 254) {
- const even_data = entries[even_state];
- var read_bits: u16 = 0;
- const even_bits = huff_bits.readBits(u32, even_data.bits, &read_bits) catch unreachable;
- weights[i] = std.math.cast(u4, even_data.symbol) orelse return error.MalformedHuffmanTree;
- i += 1;
- if (read_bits < even_data.bits) {
- weights[i] = std.math.cast(u4, entries[odd_state].symbol) orelse return error.MalformedHuffmanTree;
- i += 1;
- break;
- }
- even_state = even_data.baseline + even_bits;
-
- read_bits = 0;
- const odd_data = entries[odd_state];
- const odd_bits = huff_bits.readBits(u32, odd_data.bits, &read_bits) catch unreachable;
- weights[i] = std.math.cast(u4, odd_data.symbol) orelse return error.MalformedHuffmanTree;
- i += 1;
- if (read_bits < odd_data.bits) {
- if (i == 255) return error.MalformedHuffmanTree;
- weights[i] = std.math.cast(u4, entries[even_state].symbol) orelse return error.MalformedHuffmanTree;
- i += 1;
- break;
- }
- odd_state = odd_data.baseline + odd_bits;
- } else return error.MalformedHuffmanTree;
-
- if (!huff_bits.isEmpty()) {
- return error.MalformedHuffmanTree;
- }
-
- return i + 1; // stream contains all but the last symbol
-}
-
-fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights: *[256]u4) !usize {
- const weights_byte_count = (encoded_symbol_count + 1) / 2;
- for (0..weights_byte_count) |i| {
- const byte = try source.readByte();
- weights[2 * i] = @as(u4, @intCast(byte >> 4));
- weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF));
- }
- return encoded_symbol_count + 1;
-}
-
-fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize {
- for (0..weight_sorted_prefixed_symbols.len) |i| {
- weight_sorted_prefixed_symbols[i] = .{
- .symbol = @as(u8, @intCast(i)),
- .weight = undefined,
- .prefix = undefined,
- };
- }
-
- std.mem.sort(
- LiteralsSection.HuffmanTree.PrefixedSymbol,
- weight_sorted_prefixed_symbols,
- weights,
- lessThanByWeight,
- );
-
- var prefix: u16 = 0;
- var prefixed_symbol_count: usize = 0;
- var sorted_index: usize = 0;
- const symbol_count = weight_sorted_prefixed_symbols.len;
- while (sorted_index < symbol_count) {
- var symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
- const weight = weights[symbol];
- if (weight == 0) {
- sorted_index += 1;
- continue;
- }
-
- while (sorted_index < symbol_count) : ({
- sorted_index += 1;
- prefixed_symbol_count += 1;
- prefix += 1;
- }) {
- symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
- if (weights[symbol] != weight) {
- prefix = ((prefix - 1) >> (weights[symbol] - weight)) + 1;
- break;
- }
- weight_sorted_prefixed_symbols[prefixed_symbol_count].symbol = symbol;
- weight_sorted_prefixed_symbols[prefixed_symbol_count].prefix = prefix;
- weight_sorted_prefixed_symbols[prefixed_symbol_count].weight = weight;
- }
- }
- return prefixed_symbol_count;
-}
-
-fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffmanTree}!LiteralsSection.HuffmanTree {
- var weight_power_sum_big: u32 = 0;
- for (weights[0 .. symbol_count - 1]) |value| {
- weight_power_sum_big += (@as(u16, 1) << value) >> 1;
- }
- if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree;
- const weight_power_sum = @as(u16, @intCast(weight_power_sum_big));
-
- // advance to next power of two (even if weight_power_sum is a power of 2)
- // TODO: is it valid to have weight_power_sum == 0?
- const max_number_of_bits = if (weight_power_sum == 0) 1 else std.math.log2_int(u16, weight_power_sum) + 1;
- const next_power_of_two = @as(u16, 1) << max_number_of_bits;
- weights[symbol_count - 1] = std.math.log2_int(u16, next_power_of_two - weight_power_sum) + 1;
-
- var weight_sorted_prefixed_symbols: [256]LiteralsSection.HuffmanTree.PrefixedSymbol = undefined;
- const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*);
- const tree = LiteralsSection.HuffmanTree{
- .max_bit_count = max_number_of_bits,
- .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)),
- .nodes = weight_sorted_prefixed_symbols,
- };
- return tree;
-}
-
-pub fn decodeHuffmanTree(
- source: anytype,
- buffer: []u8,
-) (@TypeOf(source).Error || Error)!LiteralsSection.HuffmanTree {
- const header = try source.readByte();
- var weights: [256]u4 = undefined;
- const symbol_count = if (header < 128)
- // FSE compressed weights
- try decodeFseHuffmanTree(source, header, buffer, &weights)
- else
- try decodeDirectHuffmanTree(source, header - 127, &weights);
-
- return buildHuffmanTree(&weights, symbol_count);
-}
-
-pub fn decodeHuffmanTreeSlice(
- src: []const u8,
- consumed_count: *usize,
-) Error!LiteralsSection.HuffmanTree {
- if (src.len == 0) return error.MalformedHuffmanTree;
- const header = src[0];
- var bytes_read: usize = 1;
- var weights: [256]u4 = undefined;
- const symbol_count = if (header < 128) count: {
- // FSE compressed weights
- bytes_read += header;
- break :count try decodeFseHuffmanTreeSlice(src[1..], header, &weights);
- } else count: {
- var fbs = std.io.fixedBufferStream(src[1..]);
- defer bytes_read += fbs.pos;
- break :count try decodeDirectHuffmanTree(fbs.reader(), header - 127, &weights);
- };
-
- consumed_count.* += bytes_read;
- return buildHuffmanTree(&weights, symbol_count);
-}
-
-fn lessThanByWeight(
- weights: [256]u4,
- lhs: LiteralsSection.HuffmanTree.PrefixedSymbol,
- rhs: LiteralsSection.HuffmanTree.PrefixedSymbol,
-) bool {
- // NOTE: this function relies on the use of a stable sorting algorithm,
- // otherwise a special case of if (weights[lhs] == weights[rhs]) return lhs < rhs;
- // should be added
- return weights[lhs.symbol] < weights[rhs.symbol];
-}
diff --git a/lib/std/compress/zstandard/decompress.zig b/lib/std/compress/zstandard/decompress.zig
deleted file mode 100644
index adc7b89749..0000000000
--- a/lib/std/compress/zstandard/decompress.zig
+++ /dev/null
@@ -1,633 +0,0 @@
-const std = @import("std");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const RingBuffer = std.RingBuffer;
-
-const types = @import("types.zig");
-const frame = types.frame;
-const LiteralsSection = types.compressed_block.LiteralsSection;
-const SequencesSection = types.compressed_block.SequencesSection;
-const SkippableHeader = types.frame.Skippable.Header;
-const ZstandardHeader = types.frame.Zstandard.Header;
-const Table = types.compressed_block.Table;
-
-pub const block = @import("decode/block.zig");
-
-const readers = @import("readers.zig");
-
-/// Returns `true` is `magic` is a valid magic number for a skippable frame
-pub fn isSkippableMagic(magic: u32) bool {
- return frame.Skippable.magic_number_min <= magic and magic <= frame.Skippable.magic_number_max;
-}
-
-/// Returns the kind of frame at the beginning of `source`.
-///
-/// Errors returned:
-/// - `error.BadMagic` if `source` begins with bytes not equal to the
-/// Zstandard frame magic number, or outside the range of magic numbers for
-/// skippable frames.
-/// - `error.EndOfStream` if `source` contains fewer than 4 bytes
-pub fn decodeFrameType(source: anytype) error{ BadMagic, EndOfStream }!frame.Kind {
- const magic = try source.readInt(u32, .little);
- return frameType(magic);
-}
-
-/// Returns the kind of frame associated to `magic`.
-///
-/// Errors returned:
-/// - `error.BadMagic` if `magic` is not a valid magic number.
-pub fn frameType(magic: u32) error{BadMagic}!frame.Kind {
- return if (magic == frame.Zstandard.magic_number)
- .zstandard
- else if (isSkippableMagic(magic))
- .skippable
- else
- error.BadMagic;
-}
-
-pub const FrameHeader = union(enum) {
- zstandard: ZstandardHeader,
- skippable: SkippableHeader,
-};
-
-pub const HeaderError = error{ BadMagic, EndOfStream, ReservedBitSet };
-
-/// Returns the header of the frame at the beginning of `source`.
-///
-/// Errors returned:
-/// - `error.BadMagic` if `source` begins with bytes not equal to the
-/// Zstandard frame magic number, or outside the range of magic numbers for
-/// skippable frames.
-/// - `error.EndOfStream` if `source` contains fewer than 4 bytes
-/// - `error.ReservedBitSet` if the frame is a Zstandard frame and any of the
-/// reserved bits are set
-pub fn decodeFrameHeader(source: anytype) (@TypeOf(source).Error || HeaderError)!FrameHeader {
- const magic = try source.readInt(u32, .little);
- const frame_type = try frameType(magic);
- switch (frame_type) {
- .zstandard => return FrameHeader{ .zstandard = try decodeZstandardHeader(source) },
- .skippable => return FrameHeader{
- .skippable = .{
- .magic_number = magic,
- .frame_size = try source.readInt(u32, .little),
- },
- },
- }
-}
-
-pub const ReadWriteCount = struct {
- read_count: usize,
- write_count: usize,
-};
-
-/// Decodes frames from `src` into `dest`; returns the length of the result.
-/// The stream should not have extra trailing bytes - either all bytes in `src`
-/// will be decoded, or an error will be returned. An error will be returned if
-/// a Zstandard frame in `src` does not declare its content size.
-///
-/// Errors returned:
-/// - `error.DictionaryIdFlagUnsupported` if a `src` contains a frame that
-/// uses a dictionary
-/// - `error.MalformedFrame` if a frame in `src` is invalid
-/// - `error.UnknownContentSizeUnsupported` if a frame in `src` does not
-/// declare its content size
-pub fn decode(dest: []u8, src: []const u8, verify_checksum: bool) error{
- MalformedFrame,
- UnknownContentSizeUnsupported,
- DictionaryIdFlagUnsupported,
-}!usize {
- var write_count: usize = 0;
- var read_count: usize = 0;
- while (read_count < src.len) {
- const counts = decodeFrame(dest, src[read_count..], verify_checksum) catch |err| {
- switch (err) {
- error.UnknownContentSizeUnsupported => return error.UnknownContentSizeUnsupported,
- error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
- else => return error.MalformedFrame,
- }
- };
- read_count += counts.read_count;
- write_count += counts.write_count;
- }
- return write_count;
-}
-
-/// Decodes a stream of frames from `src`; returns the decoded bytes. The stream
-/// should not have extra trailing bytes - either all bytes in `src` will be
-/// decoded, or an error will be returned.
-///
-/// Errors returned:
-/// - `error.DictionaryIdFlagUnsupported` if a `src` contains a frame that
-/// uses a dictionary
-/// - `error.MalformedFrame` if a frame in `src` is invalid
-/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
-pub fn decodeAlloc(
- allocator: Allocator,
- src: []const u8,
- verify_checksum: bool,
- window_size_max: usize,
-) error{ DictionaryIdFlagUnsupported, MalformedFrame, OutOfMemory }![]u8 {
- var result = std.ArrayList(u8).init(allocator);
- errdefer result.deinit();
-
- var read_count: usize = 0;
- while (read_count < src.len) {
- read_count += decodeFrameArrayList(
- allocator,
- &result,
- src[read_count..],
- verify_checksum,
- window_size_max,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
- else => return error.MalformedFrame,
- };
- }
- return result.toOwnedSlice();
-}
-
-/// Decodes the frame at the start of `src` into `dest`. Returns the number of
-/// bytes read from `src` and written to `dest`. This function can only decode
-/// frames that declare the decompressed content size.
-///
-/// Errors returned:
-/// - `error.BadMagic` if the first 4 bytes of `src` is not a valid magic
-/// number for a Zstandard or skippable frame
-/// - `error.UnknownContentSizeUnsupported` if the frame does not declare the
-/// uncompressed content size
-/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
-/// - `error.ContentTooLarge` if `dest` is smaller than the uncompressed data
-/// size declared by the frame header
-/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
-/// that is larger than `std.math.maxInt(usize)`
-/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
-/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
-/// contains a checksum that does not match the checksum of the decompressed
-/// data
-/// - `error.ReservedBitSet` if any of the reserved bits of the frame header
-/// are set
-/// - `error.EndOfStream` if `src` does not contain a complete frame
-/// - `error.BadContentSize` if the content size declared by the frame does
-/// not equal the actual size of decompressed data
-/// - an error in `block.Error` if there are errors decoding a block
-/// - `error.SkippableSizeTooLarge` if the frame is skippable and reports a
-/// size greater than `src.len`
-pub fn decodeFrame(
- dest: []u8,
- src: []const u8,
- verify_checksum: bool,
-) (error{
- BadMagic,
- UnknownContentSizeUnsupported,
- ContentTooLarge,
- ContentSizeTooLarge,
- WindowSizeUnknown,
- DictionaryIdFlagUnsupported,
- SkippableSizeTooLarge,
-} || FrameError)!ReadWriteCount {
- var fbs = std.io.fixedBufferStream(src);
- switch (try decodeFrameType(fbs.reader())) {
- .zstandard => return decodeZstandardFrame(dest, src, verify_checksum),
- .skippable => {
- const content_size = try fbs.reader().readInt(u32, .little);
- if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
- const read_count = @as(usize, content_size) + 8;
- if (read_count > src.len) return error.SkippableSizeTooLarge;
- return ReadWriteCount{
- .read_count = read_count,
- .write_count = 0,
- };
- },
- }
-}
-
-/// Decodes the frame at the start of `src` into `dest`. Returns the number of
-/// bytes read from `src`.
-///
-/// Errors returned:
-/// - `error.BadMagic` if the first 4 bytes of `src` is not a valid magic
-/// number for a Zstandard or skippable frame
-/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
-/// - `error.WindowTooLarge` if the window size is larger than
-/// `window_size_max`
-/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
-/// that is larger than `std.math.maxInt(usize)`
-/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
-/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
-/// contains a checksum that does not match the checksum of the decompressed
-/// data
-/// - `error.ReservedBitSet` if any of the reserved bits of the frame header
-/// are set
-/// - `error.EndOfStream` if `src` does not contain a complete frame
-/// - `error.BadContentSize` if the content size declared by the frame does
-/// not equal the actual size of decompressed data
-/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
-/// - an error in `block.Error` if there are errors decoding a block
-/// - `error.SkippableSizeTooLarge` if the frame is skippable and reports a
-/// size greater than `src.len`
-pub fn decodeFrameArrayList(
- allocator: Allocator,
- dest: *std.ArrayList(u8),
- src: []const u8,
- verify_checksum: bool,
- window_size_max: usize,
-) (error{ BadMagic, OutOfMemory, SkippableSizeTooLarge } || FrameContext.Error || FrameError)!usize {
- var fbs = std.io.fixedBufferStream(src);
- const reader = fbs.reader();
- const magic = try reader.readInt(u32, .little);
- switch (try frameType(magic)) {
- .zstandard => return decodeZstandardFrameArrayList(
- allocator,
- dest,
- src,
- verify_checksum,
- window_size_max,
- ),
- .skippable => {
- const content_size = try fbs.reader().readInt(u32, .little);
- if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
- const read_count = @as(usize, content_size) + 8;
- if (read_count > src.len) return error.SkippableSizeTooLarge;
- return read_count;
- },
- }
-}
-
-/// Returns the frame checksum corresponding to the data fed into `hasher`
-pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 {
- const hash = hasher.final();
- return @as(u32, @intCast(hash & 0xFFFFFFFF));
-}
-
-const FrameError = error{
- ChecksumFailure,
- BadContentSize,
- EndOfStream,
- ReservedBitSet,
-} || block.Error;
-
-/// Decode a Zstandard frame from `src` into `dest`, returning the number of
-/// bytes read from `src` and written to `dest`. The first four bytes of `src`
-/// must be the magic number for a Zstandard frame.
-///
-/// Error returned:
-/// - `error.UnknownContentSizeUnsupported` if the frame does not declare the
-/// uncompressed content size
-/// - `error.ContentTooLarge` if `dest` is smaller than the uncompressed data
-/// size declared by the frame header
-/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
-/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
-/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
-/// that is larger than `std.math.maxInt(usize)`
-/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
-/// contains a checksum that does not match the checksum of the decompressed
-/// data
-/// - `error.ReservedBitSet` if the reserved bit of the frame header is set
-/// - `error.EndOfStream` if `src` does not contain a complete frame
-/// - an error in `block.Error` if there are errors decoding a block
-/// - `error.BadContentSize` if the content size declared by the frame does
-/// not equal the actual size of decompressed data
-pub fn decodeZstandardFrame(
- dest: []u8,
- src: []const u8,
- verify_checksum: bool,
-) (error{
- UnknownContentSizeUnsupported,
- ContentTooLarge,
- ContentSizeTooLarge,
- WindowSizeUnknown,
- DictionaryIdFlagUnsupported,
-} || FrameError)!ReadWriteCount {
- assert(std.mem.readInt(u32, src[0..4], .little) == frame.Zstandard.magic_number);
- var consumed_count: usize = 4;
-
- var frame_context = context: {
- var fbs = std.io.fixedBufferStream(src[consumed_count..]);
- const source = fbs.reader();
- const frame_header = try decodeZstandardHeader(source);
- consumed_count += fbs.pos;
- break :context FrameContext.init(
- frame_header,
- std.math.maxInt(usize),
- verify_checksum,
- ) catch |err| switch (err) {
- error.WindowTooLarge => unreachable,
- inline else => |e| return e,
- };
- };
- const counts = try decodeZStandardFrameBlocks(
- dest,
- src[consumed_count..],
- &frame_context,
- );
- return ReadWriteCount{
- .read_count = counts.read_count + consumed_count,
- .write_count = counts.write_count,
- };
-}
-
-pub fn decodeZStandardFrameBlocks(
- dest: []u8,
- src: []const u8,
- frame_context: *FrameContext,
-) (error{ ContentTooLarge, UnknownContentSizeUnsupported } || FrameError)!ReadWriteCount {
- const content_size = frame_context.content_size orelse
- return error.UnknownContentSizeUnsupported;
- if (dest.len < content_size) return error.ContentTooLarge;
-
- var consumed_count: usize = 0;
- const written_count = decodeFrameBlocksInner(
- dest[0..content_size],
- src[consumed_count..],
- &consumed_count,
- if (frame_context.hasher_opt) |*hasher| hasher else null,
- frame_context.block_size_max,
- ) catch |err| switch (err) {
- error.DestTooSmall => return error.BadContentSize,
- inline else => |e| return e,
- };
-
- if (written_count != content_size) return error.BadContentSize;
- if (frame_context.has_checksum) {
- if (src.len < consumed_count + 4) return error.EndOfStream;
- const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .little);
- consumed_count += 4;
- if (frame_context.hasher_opt) |*hasher| {
- if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
- }
- }
- return ReadWriteCount{ .read_count = consumed_count, .write_count = written_count };
-}
-
-pub const FrameContext = struct {
- hasher_opt: ?std.hash.XxHash64,
- window_size: usize,
- has_checksum: bool,
- block_size_max: usize,
- content_size: ?usize,
-
- const Error = error{
- DictionaryIdFlagUnsupported,
- WindowSizeUnknown,
- WindowTooLarge,
- ContentSizeTooLarge,
- };
- /// Validates `frame_header` and returns the associated `FrameContext`.
- ///
- /// Errors returned:
- /// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
- /// - `error.WindowSizeUnknown` if the frame does not have a valid window
- /// size
- /// - `error.WindowTooLarge` if the window size is larger than
- /// `window_size_max` or `std.math.intMax(usize)`
- /// - `error.ContentSizeTooLarge` if the frame header indicates a content
- /// size larger than `std.math.maxInt(usize)`
- pub fn init(
- frame_header: ZstandardHeader,
- window_size_max: usize,
- verify_checksum: bool,
- ) Error!FrameContext {
- if (frame_header.descriptor.dictionary_id_flag != 0)
- return error.DictionaryIdFlagUnsupported;
-
- const window_size_raw = frameWindowSize(frame_header) orelse return error.WindowSizeUnknown;
- const window_size = if (window_size_raw > window_size_max)
- return error.WindowTooLarge
- else
- std.math.cast(usize, window_size_raw) orelse return error.WindowTooLarge;
-
- const should_compute_checksum =
- frame_header.descriptor.content_checksum_flag and verify_checksum;
-
- const content_size = if (frame_header.content_size) |size|
- std.math.cast(usize, size) orelse return error.ContentSizeTooLarge
- else
- null;
-
- return .{
- .hasher_opt = if (should_compute_checksum) std.hash.XxHash64.init(0) else null,
- .window_size = window_size,
- .has_checksum = frame_header.descriptor.content_checksum_flag,
- .block_size_max = @min(types.block_size_max, window_size),
- .content_size = content_size,
- };
- }
-};
-
-/// Decode a Zstandard from from `src` and return number of bytes read; see
-/// `decodeZstandardFrame()`. The first four bytes of `src` must be the magic
-/// number for a Zstandard frame.
-///
-/// Errors returned:
-/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
-/// - `error.WindowTooLarge` if the window size is larger than
-/// `window_size_max`
-/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
-/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
-/// that is larger than `std.math.maxInt(usize)`
-/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
-/// contains a checksum that does not match the checksum of the decompressed
-/// data
-/// - `error.ReservedBitSet` if the reserved bit of the frame header is set
-/// - `error.EndOfStream` if `src` does not contain a complete frame
-/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
-/// - an error in `block.Error` if there are errors decoding a block
-/// - `error.BadContentSize` if the content size declared by the frame does
-/// not equal the size of decompressed data
-pub fn decodeZstandardFrameArrayList(
- allocator: Allocator,
- dest: *std.ArrayList(u8),
- src: []const u8,
- verify_checksum: bool,
- window_size_max: usize,
-) (error{OutOfMemory} || FrameContext.Error || FrameError)!usize {
- assert(std.mem.readInt(u32, src[0..4], .little) == frame.Zstandard.magic_number);
- var consumed_count: usize = 4;
-
- var frame_context = context: {
- var fbs = std.io.fixedBufferStream(src[consumed_count..]);
- const source = fbs.reader();
- const frame_header = try decodeZstandardHeader(source);
- consumed_count += fbs.pos;
- break :context try FrameContext.init(frame_header, window_size_max, verify_checksum);
- };
-
- consumed_count += try decodeZstandardFrameBlocksArrayList(
- allocator,
- dest,
- src[consumed_count..],
- &frame_context,
- );
- return consumed_count;
-}
-
-pub fn decodeZstandardFrameBlocksArrayList(
- allocator: Allocator,
- dest: *std.ArrayList(u8),
- src: []const u8,
- frame_context: *FrameContext,
-) (error{OutOfMemory} || FrameError)!usize {
- const initial_len = dest.items.len;
-
- var ring_buffer = try RingBuffer.init(allocator, frame_context.window_size);
- defer ring_buffer.deinit(allocator);
-
- // These tables take 7680 bytes
- var literal_fse_data: [types.compressed_block.table_size_max.literal]Table.Fse = undefined;
- var match_fse_data: [types.compressed_block.table_size_max.match]Table.Fse = undefined;
- var offset_fse_data: [types.compressed_block.table_size_max.offset]Table.Fse = undefined;
-
- var block_header = try block.decodeBlockHeaderSlice(src);
- var consumed_count: usize = 3;
- var decode_state = block.DecodeState.init(&literal_fse_data, &match_fse_data, &offset_fse_data);
- while (true) : ({
- block_header = try block.decodeBlockHeaderSlice(src[consumed_count..]);
- consumed_count += 3;
- }) {
- const written_size = try block.decodeBlockRingBuffer(
- &ring_buffer,
- src[consumed_count..],
- block_header,
- &decode_state,
- &consumed_count,
- frame_context.block_size_max,
- );
- if (frame_context.content_size) |size| {
- if (dest.items.len - initial_len > size) {
- return error.BadContentSize;
- }
- }
- if (written_size > 0) {
- const written_slice = ring_buffer.sliceLast(written_size);
- try dest.appendSlice(written_slice.first);
- try dest.appendSlice(written_slice.second);
- if (frame_context.hasher_opt) |*hasher| {
- hasher.update(written_slice.first);
- hasher.update(written_slice.second);
- }
- }
- if (block_header.last_block) break;
- }
- if (frame_context.content_size) |size| {
- if (dest.items.len - initial_len != size) {
- return error.BadContentSize;
- }
- }
-
- if (frame_context.has_checksum) {
- if (src.len < consumed_count + 4) return error.EndOfStream;
- const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .little);
- consumed_count += 4;
- if (frame_context.hasher_opt) |*hasher| {
- if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
- }
- }
- return consumed_count;
-}
-
-fn decodeFrameBlocksInner(
- dest: []u8,
- src: []const u8,
- consumed_count: *usize,
- hash: ?*std.hash.XxHash64,
- block_size_max: usize,
-) (error{ EndOfStream, DestTooSmall } || block.Error)!usize {
- // These tables take 7680 bytes
- var literal_fse_data: [types.compressed_block.table_size_max.literal]Table.Fse = undefined;
- var match_fse_data: [types.compressed_block.table_size_max.match]Table.Fse = undefined;
- var offset_fse_data: [types.compressed_block.table_size_max.offset]Table.Fse = undefined;
-
- var block_header = try block.decodeBlockHeaderSlice(src);
- var bytes_read: usize = 3;
- defer consumed_count.* += bytes_read;
- var decode_state = block.DecodeState.init(&literal_fse_data, &match_fse_data, &offset_fse_data);
- var count: usize = 0;
- while (true) : ({
- block_header = try block.decodeBlockHeaderSlice(src[bytes_read..]);
- bytes_read += 3;
- }) {
- const written_size = try block.decodeBlock(
- dest,
- src[bytes_read..],
- block_header,
- &decode_state,
- &bytes_read,
- block_size_max,
- count,
- );
- if (hash) |hash_state| hash_state.update(dest[count .. count + written_size]);
- count += written_size;
- if (block_header.last_block) break;
- }
- return count;
-}
-
-/// Decode the header of a skippable frame. The first four bytes of `src` must
-/// be a valid magic number for a skippable frame.
-pub fn decodeSkippableHeader(src: *const [8]u8) SkippableHeader {
- const magic = std.mem.readInt(u32, src[0..4], .little);
- assert(isSkippableMagic(magic));
- const frame_size = std.mem.readInt(u32, src[4..8], .little);
- return .{
- .magic_number = magic,
- .frame_size = frame_size,
- };
-}
-
-/// Returns the window size required to decompress a frame, or `null` if it
-/// cannot be determined (which indicates a malformed frame header).
-pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
- if (header.window_descriptor) |descriptor| {
- const exponent = (descriptor & 0b11111000) >> 3;
- const mantissa = descriptor & 0b00000111;
- const window_log = 10 + exponent;
- const window_base = @as(u64, 1) << @as(u6, @intCast(window_log));
- const window_add = (window_base / 8) * mantissa;
- return window_base + window_add;
- } else return header.content_size;
-}
-
-/// Decode the header of a Zstandard frame.
-///
-/// Errors returned:
-/// - `error.ReservedBitSet` if any of the reserved bits of the header are set
-/// - `error.EndOfStream` if `source` does not contain a complete header
-pub fn decodeZstandardHeader(
- source: anytype,
-) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader {
- const descriptor = @as(ZstandardHeader.Descriptor, @bitCast(try source.readByte()));
-
- if (descriptor.reserved) return error.ReservedBitSet;
-
- var window_descriptor: ?u8 = null;
- if (!descriptor.single_segment_flag) {
- window_descriptor = try source.readByte();
- }
-
- var dictionary_id: ?u32 = null;
- if (descriptor.dictionary_id_flag > 0) {
- // if flag is 3 then field_size = 4, else field_size = flag
- const field_size = (@as(u4, 1) << descriptor.dictionary_id_flag) >> 1;
- dictionary_id = try source.readVarInt(u32, .little, field_size);
- }
-
- var content_size: ?u64 = null;
- if (descriptor.single_segment_flag or descriptor.content_size_flag > 0) {
- const field_size = @as(u4, 1) << descriptor.content_size_flag;
- content_size = try source.readVarInt(u64, .little, field_size);
- if (field_size == 2) content_size.? += 256;
- }
-
- const header = ZstandardHeader{
- .descriptor = descriptor,
- .window_descriptor = window_descriptor,
- .dictionary_id = dictionary_id,
- .content_size = content_size,
- };
- return header;
-}
-
-test {
- std.testing.refAllDecls(@This());
-}
diff --git a/lib/std/compress/zstandard/readers.zig b/lib/std/compress/zstandard/readers.zig
deleted file mode 100644
index 7b15784187..0000000000
--- a/lib/std/compress/zstandard/readers.zig
+++ /dev/null
@@ -1,82 +0,0 @@
-const std = @import("std");
-
-pub const ReversedByteReader = struct {
- remaining_bytes: usize,
- bytes: []const u8,
-
- const Reader = std.io.GenericReader(*ReversedByteReader, error{}, readFn);
-
- pub fn init(bytes: []const u8) ReversedByteReader {
- return .{
- .bytes = bytes,
- .remaining_bytes = bytes.len,
- };
- }
-
- pub fn reader(self: *ReversedByteReader) Reader {
- return .{ .context = self };
- }
-
- fn readFn(ctx: *ReversedByteReader, buffer: []u8) !usize {
- if (ctx.remaining_bytes == 0) return 0;
- const byte_index = ctx.remaining_bytes - 1;
- buffer[0] = ctx.bytes[byte_index];
- // buffer[0] = @bitReverse(ctx.bytes[byte_index]);
- ctx.remaining_bytes = byte_index;
- return 1;
- }
-};
-
-/// A bit reader for reading the reversed bit streams used to encode
-/// FSE compressed data.
-pub const ReverseBitReader = struct {
- byte_reader: ReversedByteReader,
- bit_reader: std.io.BitReader(.big, ReversedByteReader.Reader),
-
- pub fn init(self: *ReverseBitReader, bytes: []const u8) error{BitStreamHasNoStartBit}!void {
- self.byte_reader = ReversedByteReader.init(bytes);
- self.bit_reader = std.io.bitReader(.big, self.byte_reader.reader());
- if (bytes.len == 0) return;
- var i: usize = 0;
- while (i < 8 and 0 == self.readBitsNoEof(u1, 1) catch unreachable) : (i += 1) {}
- if (i == 8) return error.BitStreamHasNoStartBit;
- }
-
- pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: u16) error{EndOfStream}!U {
- return self.bit_reader.readBitsNoEof(U, num_bits);
- }
-
- pub fn readBits(self: *@This(), comptime U: type, num_bits: u16, out_bits: *u16) error{}!U {
- return try self.bit_reader.readBits(U, num_bits, out_bits);
- }
-
- pub fn alignToByte(self: *@This()) void {
- self.bit_reader.alignToByte();
- }
-
- pub fn isEmpty(self: ReverseBitReader) bool {
- return self.byte_reader.remaining_bytes == 0 and self.bit_reader.count == 0;
- }
-};
-
-pub fn BitReader(comptime Reader: type) type {
- return struct {
- underlying: std.io.BitReader(.little, Reader),
-
- pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: u16) !U {
- return self.underlying.readBitsNoEof(U, num_bits);
- }
-
- pub fn readBits(self: *@This(), comptime U: type, num_bits: u16, out_bits: *u16) !U {
- return self.underlying.readBits(U, num_bits, out_bits);
- }
-
- pub fn alignToByte(self: *@This()) void {
- self.underlying.alignToByte();
- }
- };
-}
-
-pub fn bitReader(reader: anytype) BitReader(@TypeOf(reader)) {
- return .{ .underlying = std.io.bitReader(.little, reader) };
-}
diff --git a/lib/std/compress/zstandard/types.zig b/lib/std/compress/zstandard/types.zig
deleted file mode 100644
index 41c3797d16..0000000000
--- a/lib/std/compress/zstandard/types.zig
+++ /dev/null
@@ -1,403 +0,0 @@
-pub const block_size_max = 1 << 17;
-
-pub const frame = struct {
- pub const Kind = enum { zstandard, skippable };
-
- pub const Zstandard = struct {
- pub const magic_number = 0xFD2FB528;
-
- header: Header,
- data_blocks: []Block,
- checksum: ?u32,
-
- pub const Header = struct {
- descriptor: Descriptor,
- window_descriptor: ?u8,
- dictionary_id: ?u32,
- content_size: ?u64,
-
- pub const Descriptor = packed struct {
- dictionary_id_flag: u2,
- content_checksum_flag: bool,
- reserved: bool,
- unused: bool,
- single_segment_flag: bool,
- content_size_flag: u2,
- };
- };
-
- pub const Block = struct {
- pub const Header = struct {
- last_block: bool,
- block_type: Block.Type,
- block_size: u21,
- };
-
- pub const Type = enum(u2) {
- raw,
- rle,
- compressed,
- reserved,
- };
- };
- };
-
- pub const Skippable = struct {
- pub const magic_number_min = 0x184D2A50;
- pub const magic_number_max = 0x184D2A5F;
-
- pub const Header = struct {
- magic_number: u32,
- frame_size: u32,
- };
- };
-};
-
-pub const compressed_block = struct {
- pub const LiteralsSection = struct {
- header: Header,
- huffman_tree: ?HuffmanTree,
- streams: Streams,
-
- pub const Streams = union(enum) {
- one: []const u8,
- four: [4][]const u8,
- };
-
- pub const Header = struct {
- block_type: BlockType,
- size_format: u2,
- regenerated_size: u20,
- compressed_size: ?u18,
- };
-
- pub const BlockType = enum(u2) {
- raw,
- rle,
- compressed,
- treeless,
- };
-
- pub const HuffmanTree = struct {
- max_bit_count: u4,
- symbol_count_minus_one: u8,
- nodes: [256]PrefixedSymbol,
-
- pub const PrefixedSymbol = struct {
- symbol: u8,
- prefix: u16,
- weight: u4,
- };
-
- pub const Result = union(enum) {
- symbol: u8,
- index: usize,
- };
-
- pub fn query(self: HuffmanTree, index: usize, prefix: u16) error{NotFound}!Result {
- var node = self.nodes[index];
- const weight = node.weight;
- var i: usize = index;
- while (node.weight == weight) {
- if (node.prefix == prefix) return Result{ .symbol = node.symbol };
- if (i == 0) return error.NotFound;
- i -= 1;
- node = self.nodes[i];
- }
- return Result{ .index = i };
- }
-
- pub fn weightToBitCount(weight: u4, max_bit_count: u4) u4 {
- return if (weight == 0) 0 else ((max_bit_count + 1) - weight);
- }
- };
-
- pub const StreamCount = enum { one, four };
- pub fn streamCount(size_format: u2, block_type: BlockType) StreamCount {
- return switch (block_type) {
- .raw, .rle => .one,
- .compressed, .treeless => if (size_format == 0) .one else .four,
- };
- }
- };
-
- pub const SequencesSection = struct {
- header: SequencesSection.Header,
- literals_length_table: Table,
- offset_table: Table,
- match_length_table: Table,
-
- pub const Header = struct {
- sequence_count: u24,
- match_lengths: Mode,
- offsets: Mode,
- literal_lengths: Mode,
-
- pub const Mode = enum(u2) {
- predefined,
- rle,
- fse,
- repeat,
- };
- };
- };
-
- pub const Table = union(enum) {
- fse: []const Fse,
- rle: u8,
-
- pub const Fse = struct {
- symbol: u8,
- baseline: u16,
- bits: u8,
- };
- };
-
- pub const literals_length_code_table = [36]struct { u32, u5 }{
- .{ 0, 0 }, .{ 1, 0 }, .{ 2, 0 }, .{ 3, 0 },
- .{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 },
- .{ 8, 0 }, .{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 },
- .{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 }, .{ 15, 0 },
- .{ 16, 1 }, .{ 18, 1 }, .{ 20, 1 }, .{ 22, 1 },
- .{ 24, 2 }, .{ 28, 2 }, .{ 32, 3 }, .{ 40, 3 },
- .{ 48, 4 }, .{ 64, 6 }, .{ 128, 7 }, .{ 256, 8 },
- .{ 512, 9 }, .{ 1024, 10 }, .{ 2048, 11 }, .{ 4096, 12 },
- .{ 8192, 13 }, .{ 16384, 14 }, .{ 32768, 15 }, .{ 65536, 16 },
- };
-
- pub const match_length_code_table = [53]struct { u32, u5 }{
- .{ 3, 0 }, .{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 }, .{ 8, 0 },
- .{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 }, .{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 },
- .{ 15, 0 }, .{ 16, 0 }, .{ 17, 0 }, .{ 18, 0 }, .{ 19, 0 }, .{ 20, 0 },
- .{ 21, 0 }, .{ 22, 0 }, .{ 23, 0 }, .{ 24, 0 }, .{ 25, 0 }, .{ 26, 0 },
- .{ 27, 0 }, .{ 28, 0 }, .{ 29, 0 }, .{ 30, 0 }, .{ 31, 0 }, .{ 32, 0 },
- .{ 33, 0 }, .{ 34, 0 }, .{ 35, 1 }, .{ 37, 1 }, .{ 39, 1 }, .{ 41, 1 },
- .{ 43, 2 }, .{ 47, 2 }, .{ 51, 3 }, .{ 59, 3 }, .{ 67, 4 }, .{ 83, 4 },
- .{ 99, 5 }, .{ 131, 7 }, .{ 259, 8 }, .{ 515, 9 }, .{ 1027, 10 }, .{ 2051, 11 },
- .{ 4099, 12 }, .{ 8195, 13 }, .{ 16387, 14 }, .{ 32771, 15 }, .{ 65539, 16 },
- };
-
- pub const literals_length_default_distribution = [36]i16{
- 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
- -1, -1, -1, -1,
- };
-
- pub const match_lengths_default_distribution = [53]i16{
- 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
- -1, -1, -1, -1, -1,
- };
-
- pub const offset_codes_default_distribution = [29]i16{
- 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
- };
-
- pub const predefined_literal_fse_table = Table{
- .fse = &[64]Table.Fse{
- .{ .symbol = 0, .bits = 4, .baseline = 0 },
- .{ .symbol = 0, .bits = 4, .baseline = 16 },
- .{ .symbol = 1, .bits = 5, .baseline = 32 },
- .{ .symbol = 3, .bits = 5, .baseline = 0 },
- .{ .symbol = 4, .bits = 5, .baseline = 0 },
- .{ .symbol = 6, .bits = 5, .baseline = 0 },
- .{ .symbol = 7, .bits = 5, .baseline = 0 },
- .{ .symbol = 9, .bits = 5, .baseline = 0 },
- .{ .symbol = 10, .bits = 5, .baseline = 0 },
- .{ .symbol = 12, .bits = 5, .baseline = 0 },
- .{ .symbol = 14, .bits = 6, .baseline = 0 },
- .{ .symbol = 16, .bits = 5, .baseline = 0 },
- .{ .symbol = 18, .bits = 5, .baseline = 0 },
- .{ .symbol = 19, .bits = 5, .baseline = 0 },
- .{ .symbol = 21, .bits = 5, .baseline = 0 },
- .{ .symbol = 22, .bits = 5, .baseline = 0 },
- .{ .symbol = 24, .bits = 5, .baseline = 0 },
- .{ .symbol = 25, .bits = 5, .baseline = 32 },
- .{ .symbol = 26, .bits = 5, .baseline = 0 },
- .{ .symbol = 27, .bits = 6, .baseline = 0 },
- .{ .symbol = 29, .bits = 6, .baseline = 0 },
- .{ .symbol = 31, .bits = 6, .baseline = 0 },
- .{ .symbol = 0, .bits = 4, .baseline = 32 },
- .{ .symbol = 1, .bits = 4, .baseline = 0 },
- .{ .symbol = 2, .bits = 5, .baseline = 0 },
- .{ .symbol = 4, .bits = 5, .baseline = 32 },
- .{ .symbol = 5, .bits = 5, .baseline = 0 },
- .{ .symbol = 7, .bits = 5, .baseline = 32 },
- .{ .symbol = 8, .bits = 5, .baseline = 0 },
- .{ .symbol = 10, .bits = 5, .baseline = 32 },
- .{ .symbol = 11, .bits = 5, .baseline = 0 },
- .{ .symbol = 13, .bits = 6, .baseline = 0 },
- .{ .symbol = 16, .bits = 5, .baseline = 32 },
- .{ .symbol = 17, .bits = 5, .baseline = 0 },
- .{ .symbol = 19, .bits = 5, .baseline = 32 },
- .{ .symbol = 20, .bits = 5, .baseline = 0 },
- .{ .symbol = 22, .bits = 5, .baseline = 32 },
- .{ .symbol = 23, .bits = 5, .baseline = 0 },
- .{ .symbol = 25, .bits = 4, .baseline = 0 },
- .{ .symbol = 25, .bits = 4, .baseline = 16 },
- .{ .symbol = 26, .bits = 5, .baseline = 32 },
- .{ .symbol = 28, .bits = 6, .baseline = 0 },
- .{ .symbol = 30, .bits = 6, .baseline = 0 },
- .{ .symbol = 0, .bits = 4, .baseline = 48 },
- .{ .symbol = 1, .bits = 4, .baseline = 16 },
- .{ .symbol = 2, .bits = 5, .baseline = 32 },
- .{ .symbol = 3, .bits = 5, .baseline = 32 },
- .{ .symbol = 5, .bits = 5, .baseline = 32 },
- .{ .symbol = 6, .bits = 5, .baseline = 32 },
- .{ .symbol = 8, .bits = 5, .baseline = 32 },
- .{ .symbol = 9, .bits = 5, .baseline = 32 },
- .{ .symbol = 11, .bits = 5, .baseline = 32 },
- .{ .symbol = 12, .bits = 5, .baseline = 32 },
- .{ .symbol = 15, .bits = 6, .baseline = 0 },
- .{ .symbol = 17, .bits = 5, .baseline = 32 },
- .{ .symbol = 18, .bits = 5, .baseline = 32 },
- .{ .symbol = 20, .bits = 5, .baseline = 32 },
- .{ .symbol = 21, .bits = 5, .baseline = 32 },
- .{ .symbol = 23, .bits = 5, .baseline = 32 },
- .{ .symbol = 24, .bits = 5, .baseline = 32 },
- .{ .symbol = 35, .bits = 6, .baseline = 0 },
- .{ .symbol = 34, .bits = 6, .baseline = 0 },
- .{ .symbol = 33, .bits = 6, .baseline = 0 },
- .{ .symbol = 32, .bits = 6, .baseline = 0 },
- },
- };
-
- pub const predefined_match_fse_table = Table{
- .fse = &[64]Table.Fse{
- .{ .symbol = 0, .bits = 6, .baseline = 0 },
- .{ .symbol = 1, .bits = 4, .baseline = 0 },
- .{ .symbol = 2, .bits = 5, .baseline = 32 },
- .{ .symbol = 3, .bits = 5, .baseline = 0 },
- .{ .symbol = 5, .bits = 5, .baseline = 0 },
- .{ .symbol = 6, .bits = 5, .baseline = 0 },
- .{ .symbol = 8, .bits = 5, .baseline = 0 },
- .{ .symbol = 10, .bits = 6, .baseline = 0 },
- .{ .symbol = 13, .bits = 6, .baseline = 0 },
- .{ .symbol = 16, .bits = 6, .baseline = 0 },
- .{ .symbol = 19, .bits = 6, .baseline = 0 },
- .{ .symbol = 22, .bits = 6, .baseline = 0 },
- .{ .symbol = 25, .bits = 6, .baseline = 0 },
- .{ .symbol = 28, .bits = 6, .baseline = 0 },
- .{ .symbol = 31, .bits = 6, .baseline = 0 },
- .{ .symbol = 33, .bits = 6, .baseline = 0 },
- .{ .symbol = 35, .bits = 6, .baseline = 0 },
- .{ .symbol = 37, .bits = 6, .baseline = 0 },
- .{ .symbol = 39, .bits = 6, .baseline = 0 },
- .{ .symbol = 41, .bits = 6, .baseline = 0 },
- .{ .symbol = 43, .bits = 6, .baseline = 0 },
- .{ .symbol = 45, .bits = 6, .baseline = 0 },
- .{ .symbol = 1, .bits = 4, .baseline = 16 },
- .{ .symbol = 2, .bits = 4, .baseline = 0 },
- .{ .symbol = 3, .bits = 5, .baseline = 32 },
- .{ .symbol = 4, .bits = 5, .baseline = 0 },
- .{ .symbol = 6, .bits = 5, .baseline = 32 },
- .{ .symbol = 7, .bits = 5, .baseline = 0 },
- .{ .symbol = 9, .bits = 6, .baseline = 0 },
- .{ .symbol = 12, .bits = 6, .baseline = 0 },
- .{ .symbol = 15, .bits = 6, .baseline = 0 },
- .{ .symbol = 18, .bits = 6, .baseline = 0 },
- .{ .symbol = 21, .bits = 6, .baseline = 0 },
- .{ .symbol = 24, .bits = 6, .baseline = 0 },
- .{ .symbol = 27, .bits = 6, .baseline = 0 },
- .{ .symbol = 30, .bits = 6, .baseline = 0 },
- .{ .symbol = 32, .bits = 6, .baseline = 0 },
- .{ .symbol = 34, .bits = 6, .baseline = 0 },
- .{ .symbol = 36, .bits = 6, .baseline = 0 },
- .{ .symbol = 38, .bits = 6, .baseline = 0 },
- .{ .symbol = 40, .bits = 6, .baseline = 0 },
- .{ .symbol = 42, .bits = 6, .baseline = 0 },
- .{ .symbol = 44, .bits = 6, .baseline = 0 },
- .{ .symbol = 1, .bits = 4, .baseline = 32 },
- .{ .symbol = 1, .bits = 4, .baseline = 48 },
- .{ .symbol = 2, .bits = 4, .baseline = 16 },
- .{ .symbol = 4, .bits = 5, .baseline = 32 },
- .{ .symbol = 5, .bits = 5, .baseline = 32 },
- .{ .symbol = 7, .bits = 5, .baseline = 32 },
- .{ .symbol = 8, .bits = 5, .baseline = 32 },
- .{ .symbol = 11, .bits = 6, .baseline = 0 },
- .{ .symbol = 14, .bits = 6, .baseline = 0 },
- .{ .symbol = 17, .bits = 6, .baseline = 0 },
- .{ .symbol = 20, .bits = 6, .baseline = 0 },
- .{ .symbol = 23, .bits = 6, .baseline = 0 },
- .{ .symbol = 26, .bits = 6, .baseline = 0 },
- .{ .symbol = 29, .bits = 6, .baseline = 0 },
- .{ .symbol = 52, .bits = 6, .baseline = 0 },
- .{ .symbol = 51, .bits = 6, .baseline = 0 },
- .{ .symbol = 50, .bits = 6, .baseline = 0 },
- .{ .symbol = 49, .bits = 6, .baseline = 0 },
- .{ .symbol = 48, .bits = 6, .baseline = 0 },
- .{ .symbol = 47, .bits = 6, .baseline = 0 },
- .{ .symbol = 46, .bits = 6, .baseline = 0 },
- },
- };
-
- pub const predefined_offset_fse_table = Table{
- .fse = &[32]Table.Fse{
- .{ .symbol = 0, .bits = 5, .baseline = 0 },
- .{ .symbol = 6, .bits = 4, .baseline = 0 },
- .{ .symbol = 9, .bits = 5, .baseline = 0 },
- .{ .symbol = 15, .bits = 5, .baseline = 0 },
- .{ .symbol = 21, .bits = 5, .baseline = 0 },
- .{ .symbol = 3, .bits = 5, .baseline = 0 },
- .{ .symbol = 7, .bits = 4, .baseline = 0 },
- .{ .symbol = 12, .bits = 5, .baseline = 0 },
- .{ .symbol = 18, .bits = 5, .baseline = 0 },
- .{ .symbol = 23, .bits = 5, .baseline = 0 },
- .{ .symbol = 5, .bits = 5, .baseline = 0 },
- .{ .symbol = 8, .bits = 4, .baseline = 0 },
- .{ .symbol = 14, .bits = 5, .baseline = 0 },
- .{ .symbol = 20, .bits = 5, .baseline = 0 },
- .{ .symbol = 2, .bits = 5, .baseline = 0 },
- .{ .symbol = 7, .bits = 4, .baseline = 16 },
- .{ .symbol = 11, .bits = 5, .baseline = 0 },
- .{ .symbol = 17, .bits = 5, .baseline = 0 },
- .{ .symbol = 22, .bits = 5, .baseline = 0 },
- .{ .symbol = 4, .bits = 5, .baseline = 0 },
- .{ .symbol = 8, .bits = 4, .baseline = 16 },
- .{ .symbol = 13, .bits = 5, .baseline = 0 },
- .{ .symbol = 19, .bits = 5, .baseline = 0 },
- .{ .symbol = 1, .bits = 5, .baseline = 0 },
- .{ .symbol = 6, .bits = 4, .baseline = 16 },
- .{ .symbol = 10, .bits = 5, .baseline = 0 },
- .{ .symbol = 16, .bits = 5, .baseline = 0 },
- .{ .symbol = 28, .bits = 5, .baseline = 0 },
- .{ .symbol = 27, .bits = 5, .baseline = 0 },
- .{ .symbol = 26, .bits = 5, .baseline = 0 },
- .{ .symbol = 25, .bits = 5, .baseline = 0 },
- .{ .symbol = 24, .bits = 5, .baseline = 0 },
- },
- };
- pub const start_repeated_offset_1 = 1;
- pub const start_repeated_offset_2 = 4;
- pub const start_repeated_offset_3 = 8;
-
- pub const table_accuracy_log_max = struct {
- pub const literal = 9;
- pub const match = 9;
- pub const offset = 8;
- };
-
- pub const table_symbol_count_max = struct {
- pub const literal = 36;
- pub const match = 53;
- pub const offset = 32;
- };
-
- pub const default_accuracy_log = struct {
- pub const literal = 6;
- pub const match = 6;
- pub const offset = 5;
- };
- pub const table_size_max = struct {
- pub const literal = 1 << table_accuracy_log_max.literal;
- pub const match = 1 << table_accuracy_log_max.match;
- pub const offset = 1 << table_accuracy_log_max.offset;
- };
-};
-
-test {
- const testing = @import("std").testing;
- testing.refAllDeclsRecursive(@This());
-}
diff --git a/lib/std/compress/zstd.zig b/lib/std/compress/zstd.zig
new file mode 100644
index 0000000000..0352a0e1f4
--- /dev/null
+++ b/lib/std/compress/zstd.zig
@@ -0,0 +1,152 @@
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+
+pub const Decompress = @import("zstd/Decompress.zig");
+
+/// Recommended amount by the standard. Lower than this may result in inability
+/// to decompress common streams.
+pub const default_window_len = 8 * 1024 * 1024;
+pub const block_size_max = 1 << 17;
+
+pub const literals_length_default_distribution = [36]i16{
+ 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1,
+};
+
+pub const match_lengths_default_distribution = [53]i16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1,
+};
+
+pub const offset_codes_default_distribution = [29]i16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
+};
+
+pub const start_repeated_offset_1 = 1;
+pub const start_repeated_offset_2 = 4;
+pub const start_repeated_offset_3 = 8;
+
+pub const literals_length_code_table = [36]struct { u32, u5 }{
+ .{ 0, 0 }, .{ 1, 0 }, .{ 2, 0 }, .{ 3, 0 },
+ .{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 },
+ .{ 8, 0 }, .{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 },
+ .{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 }, .{ 15, 0 },
+ .{ 16, 1 }, .{ 18, 1 }, .{ 20, 1 }, .{ 22, 1 },
+ .{ 24, 2 }, .{ 28, 2 }, .{ 32, 3 }, .{ 40, 3 },
+ .{ 48, 4 }, .{ 64, 6 }, .{ 128, 7 }, .{ 256, 8 },
+ .{ 512, 9 }, .{ 1024, 10 }, .{ 2048, 11 }, .{ 4096, 12 },
+ .{ 8192, 13 }, .{ 16384, 14 }, .{ 32768, 15 }, .{ 65536, 16 },
+};
+
+pub const match_length_code_table = [53]struct { u32, u5 }{
+ .{ 3, 0 }, .{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 }, .{ 8, 0 },
+ .{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 }, .{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 },
+ .{ 15, 0 }, .{ 16, 0 }, .{ 17, 0 }, .{ 18, 0 }, .{ 19, 0 }, .{ 20, 0 },
+ .{ 21, 0 }, .{ 22, 0 }, .{ 23, 0 }, .{ 24, 0 }, .{ 25, 0 }, .{ 26, 0 },
+ .{ 27, 0 }, .{ 28, 0 }, .{ 29, 0 }, .{ 30, 0 }, .{ 31, 0 }, .{ 32, 0 },
+ .{ 33, 0 }, .{ 34, 0 }, .{ 35, 1 }, .{ 37, 1 }, .{ 39, 1 }, .{ 41, 1 },
+ .{ 43, 2 }, .{ 47, 2 }, .{ 51, 3 }, .{ 59, 3 }, .{ 67, 4 }, .{ 83, 4 },
+ .{ 99, 5 }, .{ 131, 7 }, .{ 259, 8 }, .{ 515, 9 }, .{ 1027, 10 }, .{ 2051, 11 },
+ .{ 4099, 12 }, .{ 8195, 13 }, .{ 16387, 14 }, .{ 32771, 15 }, .{ 65539, 16 },
+};
+
+pub const table_accuracy_log_max = struct {
+ pub const literal = 9;
+ pub const match = 9;
+ pub const offset = 8;
+};
+
+pub const table_symbol_count_max = struct {
+ pub const literal = 36;
+ pub const match = 53;
+ pub const offset = 32;
+};
+
+pub const default_accuracy_log = struct {
+ pub const literal = 6;
+ pub const match = 6;
+ pub const offset = 5;
+};
+pub const table_size_max = struct {
+ pub const literal = 1 << table_accuracy_log_max.literal;
+ pub const match = 1 << table_accuracy_log_max.match;
+ pub const offset = 1 << table_accuracy_log_max.offset;
+};
+
+fn testDecompress(gpa: std.mem.Allocator, compressed: []const u8) ![]u8 {
+ var out: std.ArrayListUnmanaged(u8) = .empty;
+ defer out.deinit(gpa);
+ try out.ensureUnusedCapacity(gpa, default_window_len);
+
+ var in: std.io.Reader = .fixed(compressed);
+ var zstd_stream: Decompress = .init(&in, &.{}, .{});
+ try zstd_stream.reader.appendRemaining(gpa, null, &out, .unlimited);
+
+ return out.toOwnedSlice(gpa);
+}
+
+fn testExpectDecompress(uncompressed: []const u8, compressed: []const u8) !void {
+ const gpa = std.testing.allocator;
+ const result = try testDecompress(gpa, compressed);
+ defer gpa.free(result);
+ try std.testing.expectEqualSlices(u8, uncompressed, result);
+}
+
+fn testExpectDecompressError(err: anyerror, compressed: []const u8) !void {
+ const gpa = std.testing.allocator;
+
+ var out: std.ArrayListUnmanaged(u8) = .empty;
+ defer out.deinit(gpa);
+ try out.ensureUnusedCapacity(gpa, default_window_len);
+
+ var in: std.io.Reader = .fixed(compressed);
+ var zstd_stream: Decompress = .init(&in, &.{}, .{});
+ try std.testing.expectError(
+ error.ReadFailed,
+ zstd_stream.reader.appendRemaining(gpa, null, &out, .unlimited),
+ );
+ try std.testing.expectError(err, zstd_stream.err orelse {});
+}
+
+test Decompress {
+ const uncompressed = @embedFile("testdata/rfc8478.txt");
+ const compressed3 = @embedFile("testdata/rfc8478.txt.zst.3");
+ const compressed19 = @embedFile("testdata/rfc8478.txt.zst.19");
+
+ try testExpectDecompress(uncompressed, compressed3);
+ try testExpectDecompress(uncompressed, compressed19);
+}
+
+test "zero sized raw block" {
+ const input_raw =
+ "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
+ "\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
+ "\x01\x00\x00"; // block header with: last_block set, block_type raw, block_size zero
+ try testExpectDecompress("", input_raw);
+}
+
+test "zero sized rle block" {
+ const input_rle =
+ "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
+ "\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
+ "\x03\x00\x00" ++ // block header with: last_block set, block_type rle, block_size zero
+ "\xaa"; // block_content
+ try testExpectDecompress("", input_rle);
+}
+
+test "declared raw literals size too large" {
+ const input_raw =
+ "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
+ "\x00\x00" ++ // frame header: everything unset, window descriptor zero
+ "\x95\x00\x00" ++ // block header with: last_block set, block_type compressed, block_size 18
+ "\xbc\xf3\xae" ++ // literals section header with: type raw, size_format 3, regenerated_size 716603
+ "\xa5\x9f\xe3"; // some bytes of literal content - the content is shorter than regenerated_size
+
+ // Note that the regenerated_size in the above input is larger than block maximum size, so the
+ // block can't be valid as it is a raw literals block.
+ try testExpectDecompressError(error.MalformedLiteralsSection, input_raw);
+}
diff --git a/lib/std/compress/zstd/Decompress.zig b/lib/std/compress/zstd/Decompress.zig
new file mode 100644
index 0000000000..b13a2dcf7a
--- /dev/null
+++ b/lib/std/compress/zstd/Decompress.zig
@@ -0,0 +1,1840 @@
+const Decompress = @This();
+const std = @import("std");
+const assert = std.debug.assert;
+const Reader = std.io.Reader;
+const Limit = std.io.Limit;
+const zstd = @import("../zstd.zig");
+const Writer = std.io.Writer;
+
+input: *Reader,
+reader: Reader,
+state: State,
+verify_checksum: bool,
+window_len: u32,
+err: ?Error = null,
+
+const State = union(enum) {
+ new_frame,
+ in_frame: InFrame,
+ skipping_frame: usize,
+ end,
+
+ const InFrame = struct {
+ frame: Frame,
+ checksum: ?u32,
+ decompressed_size: usize,
+ decode: Frame.Zstandard.Decode,
+ };
+};
+
+pub const Options = struct {
+ /// Verifying checksums is not implemented yet and will cause a panic if
+ /// you set this to true.
+ verify_checksum: bool = false,
+
+ /// The output buffer is asserted to have capacity for `window_len` plus
+ /// `zstd.block_size_max`.
+ ///
+ /// If `window_len` is too small, then some streams will fail to decompress
+ /// with `error.OutputBufferUndersize`.
+ window_len: u32 = zstd.default_window_len,
+};
+
+pub const Error = error{
+ BadMagic,
+ BlockOversize,
+ ChecksumFailure,
+ ContentOversize,
+ DictionaryIdFlagUnsupported,
+ EndOfStream,
+ HuffmanTreeIncomplete,
+ InvalidBitStream,
+ MalformedAccuracyLog,
+ MalformedBlock,
+ MalformedCompressedBlock,
+ MalformedFrame,
+ MalformedFseBits,
+ MalformedFseTable,
+ MalformedHuffmanTree,
+ MalformedLiteralsHeader,
+ MalformedLiteralsLength,
+ MalformedLiteralsSection,
+ MalformedSequence,
+ MissingStartBit,
+ OutputBufferUndersize,
+ InputBufferUndersize,
+ ReadFailed,
+ RepeatModeFirst,
+ ReservedBitSet,
+ ReservedBlock,
+ SequenceBufferUndersize,
+ TreelessLiteralsFirst,
+ UnexpectedEndOfLiteralStream,
+ WindowOversize,
+ WindowSizeUnknown,
+};
+
+/// When connecting `reader` to a `Writer`, `buffer` should be empty, and
+/// `Writer.buffer` capacity has requirements based on `Options.window_len`.
+///
+/// Otherwise, `buffer` has those requirements.
+pub fn init(input: *Reader, buffer: []u8, options: Options) Decompress {
+ return .{
+ .input = input,
+ .state = .new_frame,
+ .verify_checksum = options.verify_checksum,
+ .window_len = options.window_len,
+ .reader = .{
+ .vtable = &.{
+ .stream = stream,
+ .rebase = rebase,
+ },
+ .buffer = buffer,
+ .seek = 0,
+ .end = 0,
+ },
+ };
+}
+
+fn rebase(r: *Reader, capacity: usize) Reader.RebaseError!void {
+ const d: *Decompress = @alignCast(@fieldParentPtr("reader", r));
+ assert(capacity <= r.buffer.len - d.window_len);
+ assert(r.end + capacity > r.buffer.len);
+ const buffered = r.buffer[0..r.end];
+ const discard = buffered.len - d.window_len;
+ const keep = buffered[discard..];
+ @memmove(r.buffer[0..keep.len], keep);
+ r.end = keep.len;
+ r.seek -= discard;
+}
+
+fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
+ const d: *Decompress = @alignCast(@fieldParentPtr("reader", r));
+ const in = d.input;
+
+ switch (d.state) {
+ .new_frame => {
+ // Allow error.EndOfStream only on the frame magic.
+ const magic = try in.takeEnumNonexhaustive(Frame.Magic, .little);
+ initFrame(d, w.buffer.len, magic) catch |err| {
+ d.err = err;
+ return error.ReadFailed;
+ };
+ return readInFrame(d, w, limit, &d.state.in_frame) catch |err| switch (err) {
+ error.ReadFailed => return error.ReadFailed,
+ error.WriteFailed => return error.WriteFailed,
+ else => |e| {
+ d.err = e;
+ return error.ReadFailed;
+ },
+ };
+ },
+ .in_frame => |*in_frame| {
+ return readInFrame(d, w, limit, in_frame) catch |err| switch (err) {
+ error.ReadFailed => return error.ReadFailed,
+ error.WriteFailed => return error.WriteFailed,
+ else => |e| {
+ d.err = e;
+ return error.ReadFailed;
+ },
+ };
+ },
+ .skipping_frame => |*remaining| {
+ const n = in.discard(.limited(remaining.*)) catch |err| {
+ d.err = err;
+ return error.ReadFailed;
+ };
+ remaining.* -= n;
+ if (remaining.* == 0) d.state = .new_frame;
+ return 0;
+ },
+ .end => return error.EndOfStream,
+ }
+}
+
+fn initFrame(d: *Decompress, window_size_max: usize, magic: Frame.Magic) !void {
+ const in = d.input;
+ switch (magic.kind() orelse return error.BadMagic) {
+ .zstandard => {
+ const header = try Frame.Zstandard.Header.decode(in);
+ d.state = .{ .in_frame = .{
+ .frame = try Frame.init(header, window_size_max, d.verify_checksum),
+ .checksum = null,
+ .decompressed_size = 0,
+ .decode = .init,
+ } };
+ },
+ .skippable => {
+ const frame_size = try in.takeInt(u32, .little);
+ d.state = .{ .skipping_frame = frame_size };
+ },
+ }
+}
+
+fn readInFrame(d: *Decompress, w: *Writer, limit: Limit, state: *State.InFrame) !usize {
+ const in = d.input;
+ const window_len = d.window_len;
+
+ const block_header = try in.takeStruct(Frame.Zstandard.Block.Header, .little);
+ const block_size = block_header.size;
+ const frame_block_size_max = state.frame.block_size_max;
+ if (frame_block_size_max < block_size) return error.BlockOversize;
+ if (@intFromEnum(limit) < block_size) return error.OutputBufferUndersize;
+ var bytes_written: usize = 0;
+ switch (block_header.type) {
+ .raw => {
+ try in.streamExactPreserve(w, window_len, block_size);
+ bytes_written = block_size;
+ },
+ .rle => {
+ const byte = try in.takeByte();
+ try w.splatBytePreserve(window_len, byte, block_size);
+ bytes_written = block_size;
+ },
+ .compressed => {
+ var literals_buffer: [zstd.block_size_max]u8 = undefined;
+ var sequence_buffer: [zstd.block_size_max]u8 = undefined;
+ var remaining: Limit = .limited(block_size);
+ const literals = try LiteralsSection.decode(in, &remaining, &literals_buffer);
+ const sequences_header = try SequencesSection.Header.decode(in, &remaining);
+
+ const decode = &state.decode;
+ try decode.prepare(in, &remaining, literals, sequences_header);
+
+ {
+ if (sequence_buffer.len < @intFromEnum(remaining))
+ return error.SequenceBufferUndersize;
+ const seq_slice = remaining.slice(&sequence_buffer);
+ try in.readSliceAll(seq_slice);
+ var bit_stream = try ReverseBitReader.init(seq_slice);
+
+ if (sequences_header.sequence_count > 0) {
+ try decode.readInitialFseState(&bit_stream);
+
+ // Ensures the following calls to `decodeSequence` will not flush.
+ if (window_len + frame_block_size_max > w.buffer.len) return error.OutputBufferUndersize;
+ const dest = (try w.writableSliceGreedyPreserve(window_len, frame_block_size_max))[0..frame_block_size_max];
+ const write_pos = dest.ptr - w.buffer.ptr;
+ for (0..sequences_header.sequence_count - 1) |_| {
+ bytes_written += try decode.decodeSequence(w.buffer, write_pos + bytes_written, &bit_stream);
+ try decode.updateState(.literal, &bit_stream);
+ try decode.updateState(.match, &bit_stream);
+ try decode.updateState(.offset, &bit_stream);
+ }
+ bytes_written += try decode.decodeSequence(w.buffer, write_pos + bytes_written, &bit_stream);
+ if (bytes_written > dest.len) return error.MalformedSequence;
+ w.advance(bytes_written);
+ }
+
+ if (!bit_stream.isEmpty()) {
+ return error.MalformedCompressedBlock;
+ }
+ }
+
+ if (decode.literal_written_count < literals.header.regenerated_size) {
+ const len = literals.header.regenerated_size - decode.literal_written_count;
+ try decode.decodeLiterals(w, len);
+ decode.literal_written_count += len;
+ bytes_written += len;
+ }
+
+ switch (decode.literal_header.block_type) {
+ .treeless, .compressed => {
+ if (!decode.isLiteralStreamEmpty()) return error.MalformedCompressedBlock;
+ },
+ .raw, .rle => {},
+ }
+
+ if (bytes_written > frame_block_size_max) return error.BlockOversize;
+ },
+ .reserved => return error.ReservedBlock,
+ }
+
+ if (state.frame.hasher_opt) |*hasher| {
+ if (bytes_written > 0) {
+ _ = hasher;
+ @panic("TODO all those bytes written needed to go through the hasher too");
+ }
+ }
+
+ state.decompressed_size += bytes_written;
+
+ if (block_header.last) {
+ if (state.frame.has_checksum) {
+ const expected_checksum = try in.takeInt(u32, .little);
+ if (state.frame.hasher_opt) |*hasher| {
+ const actual_checksum: u32 = @truncate(hasher.final());
+ if (expected_checksum != actual_checksum) return error.ChecksumFailure;
+ }
+ }
+ if (state.frame.content_size) |content_size| {
+ if (content_size != state.decompressed_size) {
+ return error.MalformedFrame;
+ }
+ }
+ d.state = .new_frame;
+ } else if (state.frame.content_size) |content_size| {
+ if (state.decompressed_size > content_size) return error.MalformedFrame;
+ }
+
+ return bytes_written;
+}
+
+pub const Frame = struct {
+ hasher_opt: ?std.hash.XxHash64,
+ window_size: usize,
+ has_checksum: bool,
+ block_size_max: usize,
+ content_size: ?usize,
+
+ pub const Magic = enum(u32) {
+ zstandard = 0xFD2FB528,
+ _,
+
+ pub fn kind(m: Magic) ?Kind {
+ return switch (@intFromEnum(m)) {
+ @intFromEnum(Magic.zstandard) => .zstandard,
+ @intFromEnum(Skippable.magic_min)...@intFromEnum(Skippable.magic_max) => .skippable,
+ else => null,
+ };
+ }
+
+ pub fn isSkippable(m: Magic) bool {
+ return switch (@intFromEnum(m)) {
+ @intFromEnum(Skippable.magic_min)...@intFromEnum(Skippable.magic_max) => true,
+ else => false,
+ };
+ }
+ };
+
+ pub const Kind = enum { zstandard, skippable };
+
+ pub const Zstandard = struct {
+ pub const magic: Magic = .zstandard;
+
+ header: Header,
+ data_blocks: []Block,
+ checksum: ?u32,
+
+ pub const Header = struct {
+ descriptor: Descriptor,
+ window_descriptor: ?u8,
+ dictionary_id: ?u32,
+ content_size: ?u64,
+
+ pub const Descriptor = packed struct {
+ dictionary_id_flag: u2,
+ content_checksum_flag: bool,
+ reserved: bool,
+ unused: bool,
+ single_segment_flag: bool,
+ content_size_flag: u2,
+ };
+
+ pub const DecodeError = Reader.Error || error{ReservedBitSet};
+
+ pub fn decode(in: *Reader) DecodeError!Header {
+ const descriptor: Descriptor = @bitCast(try in.takeByte());
+
+ if (descriptor.reserved) return error.ReservedBitSet;
+
+ const window_descriptor: ?u8 = if (descriptor.single_segment_flag) null else try in.takeByte();
+
+ const dictionary_id: ?u32 = if (descriptor.dictionary_id_flag > 0) d: {
+ // if flag is 3 then field_size = 4, else field_size = flag
+ const field_size = (@as(u4, 1) << descriptor.dictionary_id_flag) >> 1;
+ break :d try in.takeVarInt(u32, .little, field_size);
+ } else null;
+
+ const content_size: ?u64 = if (descriptor.single_segment_flag or descriptor.content_size_flag > 0) c: {
+ const field_size = @as(u4, 1) << descriptor.content_size_flag;
+ const content_size = try in.takeVarInt(u64, .little, field_size);
+ break :c if (field_size == 2) content_size + 256 else content_size;
+ } else null;
+
+ return .{
+ .descriptor = descriptor,
+ .window_descriptor = window_descriptor,
+ .dictionary_id = dictionary_id,
+ .content_size = content_size,
+ };
+ }
+
+ /// Returns the window size required to decompress a frame, or `null` if it
+ /// cannot be determined (which indicates a malformed frame header).
+ pub fn windowSize(header: Header) ?u64 {
+ if (header.window_descriptor) |descriptor| {
+ const exponent = (descriptor & 0b11111000) >> 3;
+ const mantissa = descriptor & 0b00000111;
+ const window_log = 10 + exponent;
+ const window_base = @as(u64, 1) << @as(u6, @intCast(window_log));
+ const window_add = (window_base / 8) * mantissa;
+ return window_base + window_add;
+ } else return header.content_size;
+ }
+ };
+
+ pub const Block = struct {
+ pub const Header = packed struct(u24) {
+ last: bool,
+ type: Type,
+ size: u21,
+ };
+
+ pub const Type = enum(u2) {
+ raw,
+ rle,
+ compressed,
+ reserved,
+ };
+ };
+
+ pub const Decode = struct {
+ repeat_offsets: [3]u32,
+
+ offset: StateData(8),
+ match: StateData(9),
+ literal: StateData(9),
+
+ literal_fse_buffer: [zstd.table_size_max.literal]Table.Fse,
+ match_fse_buffer: [zstd.table_size_max.match]Table.Fse,
+ offset_fse_buffer: [zstd.table_size_max.offset]Table.Fse,
+
+ fse_tables_undefined: bool,
+
+ literal_stream_reader: ReverseBitReader,
+ literal_stream_index: usize,
+ literal_streams: LiteralsSection.Streams,
+ literal_header: LiteralsSection.Header,
+ huffman_tree: ?LiteralsSection.HuffmanTree,
+
+ literal_written_count: usize,
+
+ fn StateData(comptime max_accuracy_log: comptime_int) type {
+ return struct {
+ state: @This().State,
+ table: Table,
+ accuracy_log: u8,
+
+ const State = std.meta.Int(.unsigned, max_accuracy_log);
+ };
+ }
+
+ const init: Decode = .{
+ .repeat_offsets = .{
+ zstd.start_repeated_offset_1,
+ zstd.start_repeated_offset_2,
+ zstd.start_repeated_offset_3,
+ },
+
+ .offset = undefined,
+ .match = undefined,
+ .literal = undefined,
+
+ .literal_fse_buffer = undefined,
+ .match_fse_buffer = undefined,
+ .offset_fse_buffer = undefined,
+
+ .fse_tables_undefined = true,
+
+ .literal_written_count = 0,
+ .literal_header = undefined,
+ .literal_streams = undefined,
+ .literal_stream_reader = undefined,
+ .literal_stream_index = undefined,
+ .huffman_tree = null,
+ };
+
+ pub const PrepareError = error{
+ /// the (reversed) literal bitstream's first byte does not have any bits set
+ MissingStartBit,
+ /// `literals` is a treeless literals section and the decode state does not
+ /// have a Huffman tree from a previous block
+ TreelessLiteralsFirst,
+ /// on the first call if one of the sequence FSE tables is set to repeat mode
+ RepeatModeFirst,
+ /// an FSE table has an invalid accuracy
+ MalformedAccuracyLog,
+ /// failed decoding an FSE table
+ MalformedFseTable,
+ /// input stream ends before all FSE tables are read
+ EndOfStream,
+ ReadFailed,
+ InputBufferUndersize,
+ };
+
+ /// Prepare the decoder to decode a compressed block. Loads the
+ /// literals stream and Huffman tree from `literals` and reads the
+ /// FSE tables from `in`.
+ pub fn prepare(
+ self: *Decode,
+ in: *Reader,
+ remaining: *Limit,
+ literals: LiteralsSection,
+ sequences_header: SequencesSection.Header,
+ ) PrepareError!void {
+ self.literal_written_count = 0;
+ self.literal_header = literals.header;
+ self.literal_streams = literals.streams;
+
+ if (literals.huffman_tree) |tree| {
+ self.huffman_tree = tree;
+ } else if (literals.header.block_type == .treeless and self.huffman_tree == null) {
+ return error.TreelessLiteralsFirst;
+ }
+
+ switch (literals.header.block_type) {
+ .raw, .rle => {},
+ .compressed, .treeless => {
+ self.literal_stream_index = 0;
+ switch (literals.streams) {
+ .one => |slice| try self.initLiteralStream(slice),
+ .four => |streams| try self.initLiteralStream(streams[0]),
+ }
+ },
+ }
+
+ if (sequences_header.sequence_count > 0) {
+ try self.updateFseTable(in, remaining, .literal, sequences_header.literal_lengths);
+ try self.updateFseTable(in, remaining, .offset, sequences_header.offsets);
+ try self.updateFseTable(in, remaining, .match, sequences_header.match_lengths);
+ self.fse_tables_undefined = false;
+ }
+ }
+
+ /// Read initial FSE states for sequence decoding.
+ pub fn readInitialFseState(self: *Decode, bit_reader: *ReverseBitReader) error{EndOfStream}!void {
+ self.literal.state = try bit_reader.readBitsNoEof(u9, self.literal.accuracy_log);
+ self.offset.state = try bit_reader.readBitsNoEof(u8, self.offset.accuracy_log);
+ self.match.state = try bit_reader.readBitsNoEof(u9, self.match.accuracy_log);
+ }
+
+ fn updateRepeatOffset(self: *Decode, offset: u32) void {
+ self.repeat_offsets[2] = self.repeat_offsets[1];
+ self.repeat_offsets[1] = self.repeat_offsets[0];
+ self.repeat_offsets[0] = offset;
+ }
+
+ fn useRepeatOffset(self: *Decode, index: usize) u32 {
+ if (index == 1)
+ std.mem.swap(u32, &self.repeat_offsets[0], &self.repeat_offsets[1])
+ else if (index == 2) {
+ std.mem.swap(u32, &self.repeat_offsets[0], &self.repeat_offsets[2]);
+ std.mem.swap(u32, &self.repeat_offsets[1], &self.repeat_offsets[2]);
+ }
+ return self.repeat_offsets[0];
+ }
+
+ const WhichFse = enum { offset, match, literal };
+
+ /// TODO: don't use `@field`
+ fn updateState(
+ self: *Decode,
+ comptime choice: WhichFse,
+ bit_reader: *ReverseBitReader,
+ ) error{ MalformedFseBits, EndOfStream }!void {
+ switch (@field(self, @tagName(choice)).table) {
+ .rle => {},
+ .fse => |table| {
+ const data = table[@field(self, @tagName(choice)).state];
+ const T = @TypeOf(@field(self, @tagName(choice))).State;
+ const bits_summand = try bit_reader.readBitsNoEof(T, data.bits);
+ const next_state = std.math.cast(
+ @TypeOf(@field(self, @tagName(choice))).State,
+ data.baseline + bits_summand,
+ ) orelse return error.MalformedFseBits;
+ @field(self, @tagName(choice)).state = next_state;
+ },
+ }
+ }
+
+ const FseTableError = error{
+ MalformedFseTable,
+ MalformedAccuracyLog,
+ RepeatModeFirst,
+ EndOfStream,
+ };
+
+ /// TODO: don't use `@field`
+ fn updateFseTable(
+ self: *Decode,
+ in: *Reader,
+ remaining: *Limit,
+ comptime choice: WhichFse,
+ mode: SequencesSection.Header.Mode,
+ ) !void {
+ const field_name = @tagName(choice);
+ switch (mode) {
+ .predefined => {
+ @field(self, field_name).accuracy_log =
+ @field(zstd.default_accuracy_log, field_name);
+
+ @field(self, field_name).table =
+ @field(Table, "predefined_" ++ field_name);
+ },
+ .rle => {
+ @field(self, field_name).accuracy_log = 0;
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ @field(self, field_name).table = .{ .rle = try in.takeByte() };
+ },
+ .fse => {
+ const max_table_size = 2048;
+ const peek_len: usize = remaining.minInt(max_table_size);
+ if (in.buffer.len < peek_len) return error.InputBufferUndersize;
+ const limited_buffer = try in.peek(peek_len);
+ var bit_reader: BitReader = .{ .bytes = limited_buffer };
+ const table_size = try Table.decode(
+ &bit_reader,
+ @field(zstd.table_symbol_count_max, field_name),
+ @field(zstd.table_accuracy_log_max, field_name),
+ &@field(self, field_name ++ "_fse_buffer"),
+ );
+ @field(self, field_name).table = .{
+ .fse = (&@field(self, field_name ++ "_fse_buffer"))[0..table_size],
+ };
+ @field(self, field_name).accuracy_log = std.math.log2_int_ceil(usize, table_size);
+ in.toss(bit_reader.index);
+ remaining.* = remaining.subtract(bit_reader.index).?;
+ },
+ .repeat => if (self.fse_tables_undefined) return error.RepeatModeFirst,
+ }
+ }
+
+ const Sequence = struct {
+ literal_length: u32,
+ match_length: u32,
+ offset: u32,
+ };
+
+ fn nextSequence(
+ self: *Decode,
+ bit_reader: *ReverseBitReader,
+ ) error{ InvalidBitStream, EndOfStream }!Sequence {
+ const raw_code = self.getCode(.offset);
+ const offset_code = std.math.cast(u5, raw_code) orelse {
+ return error.InvalidBitStream;
+ };
+ const offset_value = (@as(u32, 1) << offset_code) + try bit_reader.readBitsNoEof(u32, offset_code);
+
+ const match_code = self.getCode(.match);
+ if (match_code >= zstd.match_length_code_table.len)
+ return error.InvalidBitStream;
+ const match = zstd.match_length_code_table[match_code];
+ const match_length = match[0] + try bit_reader.readBitsNoEof(u32, match[1]);
+
+ const literal_code = self.getCode(.literal);
+ if (literal_code >= zstd.literals_length_code_table.len)
+ return error.InvalidBitStream;
+ const literal = zstd.literals_length_code_table[literal_code];
+ const literal_length = literal[0] + try bit_reader.readBitsNoEof(u32, literal[1]);
+
+ const offset = if (offset_value > 3) offset: {
+ const offset = offset_value - 3;
+ self.updateRepeatOffset(offset);
+ break :offset offset;
+ } else offset: {
+ if (literal_length == 0) {
+ if (offset_value == 3) {
+ const offset = self.repeat_offsets[0] - 1;
+ self.updateRepeatOffset(offset);
+ break :offset offset;
+ }
+ break :offset self.useRepeatOffset(offset_value);
+ }
+ break :offset self.useRepeatOffset(offset_value - 1);
+ };
+
+ if (offset == 0) return error.InvalidBitStream;
+
+ return .{
+ .literal_length = literal_length,
+ .match_length = match_length,
+ .offset = offset,
+ };
+ }
+
+ /// Decode one sequence from `bit_reader` into `dest`. Updates FSE states
+ /// if `last_sequence` is `false`. Assumes `prepare` called for the block
+ /// before attempting to decode sequences.
+ fn decodeSequence(
+ decode: *Decode,
+ dest: []u8,
+ write_pos: usize,
+ bit_reader: *ReverseBitReader,
+ ) !usize {
+ const sequence = try decode.nextSequence(bit_reader);
+ const literal_length: usize = sequence.literal_length;
+ const match_length: usize = sequence.match_length;
+ const sequence_length = literal_length + match_length;
+
+ const copy_start = std.math.sub(usize, write_pos + sequence.literal_length, sequence.offset) catch
+ return error.MalformedSequence;
+
+ if (decode.literal_written_count + literal_length > decode.literal_header.regenerated_size)
+ return error.MalformedLiteralsLength;
+ var sub_bw: Writer = .fixed(dest[write_pos..]);
+ try decodeLiterals(decode, &sub_bw, literal_length);
+ decode.literal_written_count += literal_length;
+ // This is not a @memmove; it intentionally repeats patterns
+ // caused by iterating one byte at a time.
+ for (
+ dest[write_pos + literal_length ..][0..match_length],
+ dest[copy_start..][0..match_length],
+ ) |*d, s| d.* = s;
+ return sequence_length;
+ }
+
+ fn nextLiteralMultiStream(self: *Decode) error{MissingStartBit}!void {
+ self.literal_stream_index += 1;
+ try self.initLiteralStream(self.literal_streams.four[self.literal_stream_index]);
+ }
+
+ fn initLiteralStream(self: *Decode, bytes: []const u8) error{MissingStartBit}!void {
+ self.literal_stream_reader = try ReverseBitReader.init(bytes);
+ }
+
+ fn isLiteralStreamEmpty(self: *Decode) bool {
+ switch (self.literal_streams) {
+ .one => return self.literal_stream_reader.isEmpty(),
+ .four => return self.literal_stream_index == 3 and self.literal_stream_reader.isEmpty(),
+ }
+ }
+
+ const LiteralBitsError = error{
+ MissingStartBit,
+ UnexpectedEndOfLiteralStream,
+ };
+ fn readLiteralsBits(
+ self: *Decode,
+ bit_count_to_read: u16,
+ ) LiteralBitsError!u16 {
+ return self.literal_stream_reader.readBitsNoEof(u16, bit_count_to_read) catch bits: {
+ if (self.literal_streams == .four and self.literal_stream_index < 3) {
+ try self.nextLiteralMultiStream();
+ break :bits self.literal_stream_reader.readBitsNoEof(u16, bit_count_to_read) catch
+ return error.UnexpectedEndOfLiteralStream;
+ } else {
+ return error.UnexpectedEndOfLiteralStream;
+ }
+ };
+ }
+
+ /// Decode `len` bytes of literals into `w`.
+ fn decodeLiterals(d: *Decode, w: *Writer, len: usize) !void {
+ switch (d.literal_header.block_type) {
+ .raw => {
+ try w.writeAll(d.literal_streams.one[d.literal_written_count..][0..len]);
+ },
+ .rle => {
+ try w.splatByteAll(d.literal_streams.one[0], len);
+ },
+ .compressed, .treeless => {
+ if (len > w.buffer.len) return error.OutputBufferUndersize;
+ const buf = try w.writableSlice(len);
+ const huffman_tree = d.huffman_tree.?;
+ const max_bit_count = huffman_tree.max_bit_count;
+ const starting_bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
+ huffman_tree.nodes[huffman_tree.symbol_count_minus_one].weight,
+ max_bit_count,
+ );
+ var bits_read: u4 = 0;
+ var huffman_tree_index: usize = huffman_tree.symbol_count_minus_one;
+ var bit_count_to_read: u4 = starting_bit_count;
+ for (buf) |*out| {
+ var prefix: u16 = 0;
+ while (true) {
+ const new_bits = try d.readLiteralsBits(bit_count_to_read);
+ prefix <<= bit_count_to_read;
+ prefix |= new_bits;
+ bits_read += bit_count_to_read;
+ const result = try huffman_tree.query(huffman_tree_index, prefix);
+
+ switch (result) {
+ .symbol => |sym| {
+ out.* = sym;
+ bit_count_to_read = starting_bit_count;
+ bits_read = 0;
+ huffman_tree_index = huffman_tree.symbol_count_minus_one;
+ break;
+ },
+ .index => |index| {
+ huffman_tree_index = index;
+ const bit_count = LiteralsSection.HuffmanTree.weightToBitCount(
+ huffman_tree.nodes[index].weight,
+ max_bit_count,
+ );
+ bit_count_to_read = bit_count - bits_read;
+ },
+ }
+ }
+ }
+ },
+ }
+ }
+
+ /// TODO: don't use `@field`
+ fn getCode(self: *Decode, comptime choice: WhichFse) u32 {
+ return switch (@field(self, @tagName(choice)).table) {
+ .rle => |value| value,
+ .fse => |table| table[@field(self, @tagName(choice)).state].symbol,
+ };
+ }
+ };
+ };
+
+ pub const Skippable = struct {
+ pub const magic_min: Magic = @enumFromInt(0x184D2A50);
+ pub const magic_max: Magic = @enumFromInt(0x184D2A5F);
+
+ pub const Header = struct {
+ magic_number: u32,
+ frame_size: u32,
+ };
+ };
+
+ const InitError = error{
+ /// Frame uses a dictionary.
+ DictionaryIdFlagUnsupported,
+ /// Frame does not have a valid window size.
+ WindowSizeUnknown,
+ /// Window size exceeds `window_size_max` or max `usize` value.
+ WindowOversize,
+ /// Frame header indicates a content size exceeding max `usize` value.
+ ContentOversize,
+ };
+
+ /// Validates `frame_header` and returns the associated `Frame`.
+ pub fn init(
+ frame_header: Frame.Zstandard.Header,
+ window_size_max: usize,
+ verify_checksum: bool,
+ ) InitError!Frame {
+ if (frame_header.descriptor.dictionary_id_flag != 0)
+ return error.DictionaryIdFlagUnsupported;
+
+ const window_size_raw = frame_header.windowSize() orelse return error.WindowSizeUnknown;
+ const window_size = if (window_size_raw > window_size_max)
+ return error.WindowOversize
+ else
+ std.math.cast(usize, window_size_raw) orelse return error.WindowOversize;
+
+ const should_compute_checksum =
+ frame_header.descriptor.content_checksum_flag and verify_checksum;
+
+ const content_size = if (frame_header.content_size) |size|
+ std.math.cast(usize, size) orelse return error.ContentOversize
+ else
+ null;
+
+ return .{
+ .hasher_opt = if (should_compute_checksum) std.hash.XxHash64.init(0) else null,
+ .window_size = window_size,
+ .has_checksum = frame_header.descriptor.content_checksum_flag,
+ .block_size_max = @min(zstd.block_size_max, window_size),
+ .content_size = content_size,
+ };
+ }
+};
+
+pub const LiteralsSection = struct {
+ header: Header,
+ huffman_tree: ?HuffmanTree,
+ streams: Streams,
+
+ pub const Streams = union(enum) {
+ one: []const u8,
+ four: [4][]const u8,
+
+ fn decode(size_format: u2, stream_data: []const u8) !Streams {
+ if (size_format == 0) {
+ return .{ .one = stream_data };
+ }
+
+ if (stream_data.len < 6) return error.MalformedLiteralsSection;
+
+ const stream_1_length: usize = std.mem.readInt(u16, stream_data[0..2], .little);
+ const stream_2_length: usize = std.mem.readInt(u16, stream_data[2..4], .little);
+ const stream_3_length: usize = std.mem.readInt(u16, stream_data[4..6], .little);
+
+ const stream_1_start = 6;
+ const stream_2_start = stream_1_start + stream_1_length;
+ const stream_3_start = stream_2_start + stream_2_length;
+ const stream_4_start = stream_3_start + stream_3_length;
+
+ if (stream_data.len < stream_4_start) return error.MalformedLiteralsSection;
+
+ return .{ .four = .{
+ stream_data[stream_1_start .. stream_1_start + stream_1_length],
+ stream_data[stream_2_start .. stream_2_start + stream_2_length],
+ stream_data[stream_3_start .. stream_3_start + stream_3_length],
+ stream_data[stream_4_start..],
+ } };
+ }
+ };
+
+ pub const Header = struct {
+ block_type: BlockType,
+ size_format: u2,
+ regenerated_size: u20,
+ compressed_size: ?u18,
+
+ /// Decode a literals section header.
+ pub fn decode(in: *Reader, remaining: *Limit) !Header {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ const byte0 = try in.takeByte();
+ const block_type: BlockType = @enumFromInt(byte0 & 0b11);
+ const size_format: u2 = @intCast((byte0 & 0b1100) >> 2);
+ var regenerated_size: u20 = undefined;
+ var compressed_size: ?u18 = null;
+ switch (block_type) {
+ .raw, .rle => {
+ switch (size_format) {
+ 0, 2 => {
+ regenerated_size = byte0 >> 3;
+ },
+ 1 => {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ regenerated_size = (byte0 >> 4) + (@as(u20, try in.takeByte()) << 4);
+ },
+ 3 => {
+ remaining.* = remaining.subtract(2) orelse return error.EndOfStream;
+ regenerated_size = (byte0 >> 4) +
+ (@as(u20, try in.takeByte()) << 4) +
+ (@as(u20, try in.takeByte()) << 12);
+ },
+ }
+ },
+ .compressed, .treeless => {
+ remaining.* = remaining.subtract(2) orelse return error.EndOfStream;
+ const byte1 = try in.takeByte();
+ const byte2 = try in.takeByte();
+ switch (size_format) {
+ 0, 1 => {
+ regenerated_size = (byte0 >> 4) + ((@as(u20, byte1) & 0b00111111) << 4);
+ compressed_size = ((byte1 & 0b11000000) >> 6) + (@as(u18, byte2) << 2);
+ },
+ 2 => {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ const byte3 = try in.takeByte();
+ regenerated_size = (byte0 >> 4) + (@as(u20, byte1) << 4) + ((@as(u20, byte2) & 0b00000011) << 12);
+ compressed_size = ((byte2 & 0b11111100) >> 2) + (@as(u18, byte3) << 6);
+ },
+ 3 => {
+ remaining.* = remaining.subtract(2) orelse return error.EndOfStream;
+ const byte3 = try in.takeByte();
+ const byte4 = try in.takeByte();
+ regenerated_size = (byte0 >> 4) + (@as(u20, byte1) << 4) + ((@as(u20, byte2) & 0b00111111) << 12);
+ compressed_size = ((byte2 & 0b11000000) >> 6) + (@as(u18, byte3) << 2) + (@as(u18, byte4) << 10);
+ },
+ }
+ },
+ }
+ return .{
+ .block_type = block_type,
+ .size_format = size_format,
+ .regenerated_size = regenerated_size,
+ .compressed_size = compressed_size,
+ };
+ }
+ };
+
+ pub const BlockType = enum(u2) {
+ raw,
+ rle,
+ compressed,
+ treeless,
+ };
+
+ pub const HuffmanTree = struct {
+ max_bit_count: u4,
+ symbol_count_minus_one: u8,
+ nodes: [256]PrefixedSymbol,
+
+ pub const PrefixedSymbol = struct {
+ symbol: u8,
+ prefix: u16,
+ weight: u4,
+ };
+
+ pub const Result = union(enum) {
+ symbol: u8,
+ index: usize,
+ };
+
+ pub fn query(self: HuffmanTree, index: usize, prefix: u16) error{HuffmanTreeIncomplete}!Result {
+ var node = self.nodes[index];
+ const weight = node.weight;
+ var i: usize = index;
+ while (node.weight == weight) {
+ if (node.prefix == prefix) return .{ .symbol = node.symbol };
+ if (i == 0) return error.HuffmanTreeIncomplete;
+ i -= 1;
+ node = self.nodes[i];
+ }
+ return .{ .index = i };
+ }
+
+ pub fn weightToBitCount(weight: u4, max_bit_count: u4) u4 {
+ return if (weight == 0) 0 else ((max_bit_count + 1) - weight);
+ }
+
+ pub const DecodeError = Reader.Error || error{
+ MalformedHuffmanTree,
+ MalformedFseTable,
+ MalformedAccuracyLog,
+ EndOfStream,
+ MissingStartBit,
+ };
+
+ pub fn decode(in: *Reader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ const header = try in.takeByte();
+ if (header < 128) {
+ return decodeFse(in, remaining, header);
+ } else {
+ return decodeDirect(in, remaining, header - 127);
+ }
+ }
+
+ fn decodeDirect(
+ in: *Reader,
+ remaining: *Limit,
+ encoded_symbol_count: usize,
+ ) HuffmanTree.DecodeError!HuffmanTree {
+ var weights: [256]u4 = undefined;
+ const weights_byte_count = (encoded_symbol_count + 1) / 2;
+ remaining.* = remaining.subtract(weights_byte_count) orelse return error.EndOfStream;
+ for (0..weights_byte_count) |i| {
+ const byte = try in.takeByte();
+ weights[2 * i] = @as(u4, @intCast(byte >> 4));
+ weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF));
+ }
+ const symbol_count = encoded_symbol_count + 1;
+ return build(&weights, symbol_count);
+ }
+
+ fn decodeFse(
+ in: *Reader,
+ remaining: *Limit,
+ compressed_size: usize,
+ ) HuffmanTree.DecodeError!HuffmanTree {
+ var weights: [256]u4 = undefined;
+ remaining.* = remaining.subtract(compressed_size) orelse return error.EndOfStream;
+ const compressed_buffer = try in.take(compressed_size);
+ var bit_reader: BitReader = .{ .bytes = compressed_buffer };
+ var entries: [1 << 6]Table.Fse = undefined;
+ const table_size = try Table.decode(&bit_reader, 256, 6, &entries);
+ const accuracy_log = std.math.log2_int_ceil(usize, table_size);
+ const remaining_buffer = bit_reader.bytes[bit_reader.index..];
+ const symbol_count = try assignWeights(remaining_buffer, accuracy_log, &entries, &weights);
+ return build(&weights, symbol_count);
+ }
+
+ fn assignWeights(
+ huff_bits_buffer: []const u8,
+ accuracy_log: u16,
+ entries: *[1 << 6]Table.Fse,
+ weights: *[256]u4,
+ ) !usize {
+ var huff_bits = try ReverseBitReader.init(huff_bits_buffer);
+
+ var i: usize = 0;
+ var even_state: u32 = try huff_bits.readBitsNoEof(u32, accuracy_log);
+ var odd_state: u32 = try huff_bits.readBitsNoEof(u32, accuracy_log);
+
+ while (i < 254) {
+ const even_data = entries[even_state];
+ var read_bits: u16 = 0;
+ const even_bits = huff_bits.readBits(u32, even_data.bits, &read_bits) catch unreachable;
+ weights[i] = std.math.cast(u4, even_data.symbol) orelse return error.MalformedHuffmanTree;
+ i += 1;
+ if (read_bits < even_data.bits) {
+ weights[i] = std.math.cast(u4, entries[odd_state].symbol) orelse return error.MalformedHuffmanTree;
+ i += 1;
+ break;
+ }
+ even_state = even_data.baseline + even_bits;
+
+ read_bits = 0;
+ const odd_data = entries[odd_state];
+ const odd_bits = huff_bits.readBits(u32, odd_data.bits, &read_bits) catch unreachable;
+ weights[i] = std.math.cast(u4, odd_data.symbol) orelse return error.MalformedHuffmanTree;
+ i += 1;
+ if (read_bits < odd_data.bits) {
+ if (i == 255) return error.MalformedHuffmanTree;
+ weights[i] = std.math.cast(u4, entries[even_state].symbol) orelse return error.MalformedHuffmanTree;
+ i += 1;
+ break;
+ }
+ odd_state = odd_data.baseline + odd_bits;
+ } else return error.MalformedHuffmanTree;
+
+ if (!huff_bits.isEmpty()) {
+ return error.MalformedHuffmanTree;
+ }
+
+ return i + 1; // stream contains all but the last symbol
+ }
+
+ fn assignSymbols(weight_sorted_prefixed_symbols: []PrefixedSymbol, weights: [256]u4) usize {
+ for (0..weight_sorted_prefixed_symbols.len) |i| {
+ weight_sorted_prefixed_symbols[i] = .{
+ .symbol = @as(u8, @intCast(i)),
+ .weight = undefined,
+ .prefix = undefined,
+ };
+ }
+
+ std.mem.sort(
+ PrefixedSymbol,
+ weight_sorted_prefixed_symbols,
+ weights,
+ lessThanByWeight,
+ );
+
+ var prefix: u16 = 0;
+ var prefixed_symbol_count: usize = 0;
+ var sorted_index: usize = 0;
+ const symbol_count = weight_sorted_prefixed_symbols.len;
+ while (sorted_index < symbol_count) {
+ var symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
+ const weight = weights[symbol];
+ if (weight == 0) {
+ sorted_index += 1;
+ continue;
+ }
+
+ while (sorted_index < symbol_count) : ({
+ sorted_index += 1;
+ prefixed_symbol_count += 1;
+ prefix += 1;
+ }) {
+ symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
+ if (weights[symbol] != weight) {
+ prefix = ((prefix - 1) >> (weights[symbol] - weight)) + 1;
+ break;
+ }
+ weight_sorted_prefixed_symbols[prefixed_symbol_count].symbol = symbol;
+ weight_sorted_prefixed_symbols[prefixed_symbol_count].prefix = prefix;
+ weight_sorted_prefixed_symbols[prefixed_symbol_count].weight = weight;
+ }
+ }
+ return prefixed_symbol_count;
+ }
+
+ fn build(weights: *[256]u4, symbol_count: usize) error{MalformedHuffmanTree}!HuffmanTree {
+ var weight_power_sum_big: u32 = 0;
+ for (weights[0 .. symbol_count - 1]) |value| {
+ weight_power_sum_big += (@as(u16, 1) << value) >> 1;
+ }
+ if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree;
+ const weight_power_sum = @as(u16, @intCast(weight_power_sum_big));
+
+ // advance to next power of two (even if weight_power_sum is a power of 2)
+ // TODO: is it valid to have weight_power_sum == 0?
+ const max_number_of_bits = if (weight_power_sum == 0) 1 else std.math.log2_int(u16, weight_power_sum) + 1;
+ const next_power_of_two = @as(u16, 1) << max_number_of_bits;
+ weights[symbol_count - 1] = std.math.log2_int(u16, next_power_of_two - weight_power_sum) + 1;
+
+ var weight_sorted_prefixed_symbols: [256]PrefixedSymbol = undefined;
+ const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*);
+ const tree: HuffmanTree = .{
+ .max_bit_count = max_number_of_bits,
+ .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)),
+ .nodes = weight_sorted_prefixed_symbols,
+ };
+ return tree;
+ }
+
+ fn lessThanByWeight(
+ weights: [256]u4,
+ lhs: PrefixedSymbol,
+ rhs: PrefixedSymbol,
+ ) bool {
+ // NOTE: this function relies on the use of a stable sorting algorithm,
+ // otherwise a special case of if (weights[lhs] == weights[rhs]) return lhs < rhs;
+ // should be added
+ return weights[lhs.symbol] < weights[rhs.symbol];
+ }
+ };
+
+ pub const StreamCount = enum { one, four };
+ pub fn streamCount(size_format: u2, block_type: BlockType) StreamCount {
+ return switch (block_type) {
+ .raw, .rle => .one,
+ .compressed, .treeless => if (size_format == 0) .one else .four,
+ };
+ }
+
+ pub const DecodeError = error{
+ /// Invalid header.
+ MalformedLiteralsHeader,
+ /// Decoding errors.
+ MalformedLiteralsSection,
+ /// Compressed literals have invalid accuracy.
+ MalformedAccuracyLog,
+ /// Compressed literals have invalid FSE table.
+ MalformedFseTable,
+ /// Failed decoding a Huffamn tree.
+ MalformedHuffmanTree,
+ /// Not enough bytes to complete the section.
+ EndOfStream,
+ ReadFailed,
+ MissingStartBit,
+ };
+
+ pub fn decode(in: *Reader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection {
+ const header = try Header.decode(in, remaining);
+ switch (header.block_type) {
+ .raw => {
+ if (buffer.len < header.regenerated_size) return error.MalformedLiteralsSection;
+ remaining.* = remaining.subtract(header.regenerated_size) orelse return error.EndOfStream;
+ try in.readSliceAll(buffer[0..header.regenerated_size]);
+ return .{
+ .header = header,
+ .huffman_tree = null,
+ .streams = .{ .one = buffer },
+ };
+ },
+ .rle => {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ buffer[0] = try in.takeByte();
+ return .{
+ .header = header,
+ .huffman_tree = null,
+ .streams = .{ .one = buffer[0..1] },
+ };
+ },
+ .compressed, .treeless => {
+ const before_remaining = remaining.*;
+ const huffman_tree = if (header.block_type == .compressed)
+ try HuffmanTree.decode(in, remaining)
+ else
+ null;
+ const huffman_tree_size = @intFromEnum(before_remaining) - @intFromEnum(remaining.*);
+ const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch
+ return error.MalformedLiteralsSection;
+ if (total_streams_size > buffer.len) return error.MalformedLiteralsSection;
+ remaining.* = remaining.subtract(total_streams_size) orelse return error.EndOfStream;
+ try in.readSliceAll(buffer[0..total_streams_size]);
+ const stream_data = buffer[0..total_streams_size];
+ const streams = try Streams.decode(header.size_format, stream_data);
+ return .{
+ .header = header,
+ .huffman_tree = huffman_tree,
+ .streams = streams,
+ };
+ },
+ }
+ }
+};
+
+pub const SequencesSection = struct {
+ header: Header,
+ literals_length_table: Table,
+ offset_table: Table,
+ match_length_table: Table,
+
+ pub const Header = struct {
+ sequence_count: u24,
+ match_lengths: Mode,
+ offsets: Mode,
+ literal_lengths: Mode,
+
+ pub const Mode = enum(u2) {
+ predefined,
+ rle,
+ fse,
+ repeat,
+ };
+
+ pub const DecodeError = error{
+ ReservedBitSet,
+ EndOfStream,
+ ReadFailed,
+ };
+
+ pub fn decode(in: *Reader, remaining: *Limit) DecodeError!Header {
+ var sequence_count: u24 = undefined;
+
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ const byte0 = try in.takeByte();
+ if (byte0 == 0) {
+ return .{
+ .sequence_count = 0,
+ .offsets = undefined,
+ .match_lengths = undefined,
+ .literal_lengths = undefined,
+ };
+ } else if (byte0 < 128) {
+ remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
+ sequence_count = byte0;
+ } else if (byte0 < 255) {
+ remaining.* = remaining.subtract(2) orelse return error.EndOfStream;
+ sequence_count = (@as(u24, (byte0 - 128)) << 8) + try in.takeByte();
+ } else {
+ remaining.* = remaining.subtract(3) orelse return error.EndOfStream;
+ sequence_count = (try in.takeByte()) + (@as(u24, try in.takeByte()) << 8) + 0x7F00;
+ }
+
+ const compression_modes = try in.takeByte();
+
+ const matches_mode: Header.Mode = @enumFromInt((compression_modes & 0b00001100) >> 2);
+ const offsets_mode: Header.Mode = @enumFromInt((compression_modes & 0b00110000) >> 4);
+ const literal_mode: Header.Mode = @enumFromInt((compression_modes & 0b11000000) >> 6);
+ if (compression_modes & 0b11 != 0) return error.ReservedBitSet;
+
+ return .{
+ .sequence_count = sequence_count,
+ .offsets = offsets_mode,
+ .match_lengths = matches_mode,
+ .literal_lengths = literal_mode,
+ };
+ }
+ };
+};
+
+pub const Table = union(enum) {
+ fse: []const Fse,
+ rle: u8,
+
+ pub const Fse = struct {
+ symbol: u8,
+ baseline: u16,
+ bits: u8,
+ };
+
+ pub fn decode(
+ bit_reader: *BitReader,
+ expected_symbol_count: usize,
+ max_accuracy_log: u4,
+ entries: []Table.Fse,
+ ) !usize {
+ const accuracy_log_biased = try bit_reader.readBitsNoEof(u4, 4);
+ if (accuracy_log_biased > max_accuracy_log -| 5) return error.MalformedAccuracyLog;
+ const accuracy_log = accuracy_log_biased + 5;
+
+ var values: [256]u16 = undefined;
+ var value_count: usize = 0;
+
+ const total_probability = @as(u16, 1) << accuracy_log;
+ var accumulated_probability: u16 = 0;
+
+ while (accumulated_probability < total_probability) {
+ // WARNING: The RFC is poorly worded, and would suggest std.math.log2_int_ceil is correct here,
+ // but power of two (remaining probabilities + 1) need max bits set to 1 more.
+ const max_bits = std.math.log2_int(u16, total_probability - accumulated_probability + 1) + 1;
+ const small = try bit_reader.readBitsNoEof(u16, max_bits - 1);
+
+ const cutoff = (@as(u16, 1) << max_bits) - 1 - (total_probability - accumulated_probability + 1);
+
+ const value = if (small < cutoff)
+ small
+ else value: {
+ const value_read = small + (try bit_reader.readBitsNoEof(u16, 1) << (max_bits - 1));
+ break :value if (value_read < @as(u16, 1) << (max_bits - 1))
+ value_read
+ else
+ value_read - cutoff;
+ };
+
+ accumulated_probability += if (value != 0) value - 1 else 1;
+
+ values[value_count] = value;
+ value_count += 1;
+
+ if (value == 1) {
+ while (true) {
+ const repeat_flag = try bit_reader.readBitsNoEof(u2, 2);
+ if (repeat_flag + value_count > 256) return error.MalformedFseTable;
+ for (0..repeat_flag) |_| {
+ values[value_count] = 1;
+ value_count += 1;
+ }
+ if (repeat_flag < 3) break;
+ }
+ }
+ if (value_count == 256) break;
+ }
+ bit_reader.alignToByte();
+
+ if (value_count < 2) return error.MalformedFseTable;
+ if (accumulated_probability != total_probability) return error.MalformedFseTable;
+ if (value_count > expected_symbol_count) return error.MalformedFseTable;
+
+ const table_size = total_probability;
+
+ try build(values[0..value_count], entries[0..table_size]);
+ return table_size;
+ }
+
+ pub fn build(values: []const u16, entries: []Table.Fse) !void {
+ const total_probability = @as(u16, @intCast(entries.len));
+ const accuracy_log = std.math.log2_int(u16, total_probability);
+ assert(total_probability <= 1 << 9);
+
+ var less_than_one_count: usize = 0;
+ for (values, 0..) |value, i| {
+ if (value == 0) {
+ entries[entries.len - 1 - less_than_one_count] = Table.Fse{
+ .symbol = @as(u8, @intCast(i)),
+ .baseline = 0,
+ .bits = accuracy_log,
+ };
+ less_than_one_count += 1;
+ }
+ }
+
+ var position: usize = 0;
+ var temp_states: [1 << 9]u16 = undefined;
+ for (values, 0..) |value, symbol| {
+ if (value == 0 or value == 1) continue;
+ const probability = value - 1;
+
+ const state_share_dividend = std.math.ceilPowerOfTwo(u16, probability) catch
+ return error.MalformedFseTable;
+ const share_size = @divExact(total_probability, state_share_dividend);
+ const double_state_count = state_share_dividend - probability;
+ const single_state_count = probability - double_state_count;
+ const share_size_log = std.math.log2_int(u16, share_size);
+
+ for (0..probability) |i| {
+ temp_states[i] = @as(u16, @intCast(position));
+ position += (entries.len >> 1) + (entries.len >> 3) + 3;
+ position &= entries.len - 1;
+ while (position >= entries.len - less_than_one_count) {
+ position += (entries.len >> 1) + (entries.len >> 3) + 3;
+ position &= entries.len - 1;
+ }
+ }
+ std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
+ for (0..probability) |i| {
+ entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
+ .symbol = @as(u8, @intCast(symbol)),
+ .bits = share_size_log + 1,
+ .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size,
+ } else Table.Fse{
+ .symbol = @as(u8, @intCast(symbol)),
+ .bits = share_size_log,
+ .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size,
+ };
+ }
+ }
+ }
+
+ test build {
+ const literals_length_default_values = [36]u16{
+ 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0,
+ };
+
+ const match_lengths_default_values = [53]u16{
+ 2, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+
+ const offset_codes_default_values = [29]u16{
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0,
+ };
+
+ var entries: [64]Table.Fse = undefined;
+ try build(&literals_length_default_values, &entries);
+ try std.testing.expectEqualSlices(Table.Fse, Table.predefined_literal.fse, &entries);
+
+ try build(&match_lengths_default_values, &entries);
+ try std.testing.expectEqualSlices(Table.Fse, Table.predefined_match.fse, &entries);
+
+ try build(&offset_codes_default_values, entries[0..32]);
+ try std.testing.expectEqualSlices(Table.Fse, Table.predefined_offset.fse, entries[0..32]);
+ }
+
+ pub const predefined_literal: Table = .{
+ .fse = &[64]Table.Fse{
+ .{ .symbol = 0, .bits = 4, .baseline = 0 },
+ .{ .symbol = 0, .bits = 4, .baseline = 16 },
+ .{ .symbol = 1, .bits = 5, .baseline = 32 },
+ .{ .symbol = 3, .bits = 5, .baseline = 0 },
+ .{ .symbol = 4, .bits = 5, .baseline = 0 },
+ .{ .symbol = 6, .bits = 5, .baseline = 0 },
+ .{ .symbol = 7, .bits = 5, .baseline = 0 },
+ .{ .symbol = 9, .bits = 5, .baseline = 0 },
+ .{ .symbol = 10, .bits = 5, .baseline = 0 },
+ .{ .symbol = 12, .bits = 5, .baseline = 0 },
+ .{ .symbol = 14, .bits = 6, .baseline = 0 },
+ .{ .symbol = 16, .bits = 5, .baseline = 0 },
+ .{ .symbol = 18, .bits = 5, .baseline = 0 },
+ .{ .symbol = 19, .bits = 5, .baseline = 0 },
+ .{ .symbol = 21, .bits = 5, .baseline = 0 },
+ .{ .symbol = 22, .bits = 5, .baseline = 0 },
+ .{ .symbol = 24, .bits = 5, .baseline = 0 },
+ .{ .symbol = 25, .bits = 5, .baseline = 32 },
+ .{ .symbol = 26, .bits = 5, .baseline = 0 },
+ .{ .symbol = 27, .bits = 6, .baseline = 0 },
+ .{ .symbol = 29, .bits = 6, .baseline = 0 },
+ .{ .symbol = 31, .bits = 6, .baseline = 0 },
+ .{ .symbol = 0, .bits = 4, .baseline = 32 },
+ .{ .symbol = 1, .bits = 4, .baseline = 0 },
+ .{ .symbol = 2, .bits = 5, .baseline = 0 },
+ .{ .symbol = 4, .bits = 5, .baseline = 32 },
+ .{ .symbol = 5, .bits = 5, .baseline = 0 },
+ .{ .symbol = 7, .bits = 5, .baseline = 32 },
+ .{ .symbol = 8, .bits = 5, .baseline = 0 },
+ .{ .symbol = 10, .bits = 5, .baseline = 32 },
+ .{ .symbol = 11, .bits = 5, .baseline = 0 },
+ .{ .symbol = 13, .bits = 6, .baseline = 0 },
+ .{ .symbol = 16, .bits = 5, .baseline = 32 },
+ .{ .symbol = 17, .bits = 5, .baseline = 0 },
+ .{ .symbol = 19, .bits = 5, .baseline = 32 },
+ .{ .symbol = 20, .bits = 5, .baseline = 0 },
+ .{ .symbol = 22, .bits = 5, .baseline = 32 },
+ .{ .symbol = 23, .bits = 5, .baseline = 0 },
+ .{ .symbol = 25, .bits = 4, .baseline = 0 },
+ .{ .symbol = 25, .bits = 4, .baseline = 16 },
+ .{ .symbol = 26, .bits = 5, .baseline = 32 },
+ .{ .symbol = 28, .bits = 6, .baseline = 0 },
+ .{ .symbol = 30, .bits = 6, .baseline = 0 },
+ .{ .symbol = 0, .bits = 4, .baseline = 48 },
+ .{ .symbol = 1, .bits = 4, .baseline = 16 },
+ .{ .symbol = 2, .bits = 5, .baseline = 32 },
+ .{ .symbol = 3, .bits = 5, .baseline = 32 },
+ .{ .symbol = 5, .bits = 5, .baseline = 32 },
+ .{ .symbol = 6, .bits = 5, .baseline = 32 },
+ .{ .symbol = 8, .bits = 5, .baseline = 32 },
+ .{ .symbol = 9, .bits = 5, .baseline = 32 },
+ .{ .symbol = 11, .bits = 5, .baseline = 32 },
+ .{ .symbol = 12, .bits = 5, .baseline = 32 },
+ .{ .symbol = 15, .bits = 6, .baseline = 0 },
+ .{ .symbol = 17, .bits = 5, .baseline = 32 },
+ .{ .symbol = 18, .bits = 5, .baseline = 32 },
+ .{ .symbol = 20, .bits = 5, .baseline = 32 },
+ .{ .symbol = 21, .bits = 5, .baseline = 32 },
+ .{ .symbol = 23, .bits = 5, .baseline = 32 },
+ .{ .symbol = 24, .bits = 5, .baseline = 32 },
+ .{ .symbol = 35, .bits = 6, .baseline = 0 },
+ .{ .symbol = 34, .bits = 6, .baseline = 0 },
+ .{ .symbol = 33, .bits = 6, .baseline = 0 },
+ .{ .symbol = 32, .bits = 6, .baseline = 0 },
+ },
+ };
+
+ pub const predefined_match: Table = .{
+ .fse = &[64]Table.Fse{
+ .{ .symbol = 0, .bits = 6, .baseline = 0 },
+ .{ .symbol = 1, .bits = 4, .baseline = 0 },
+ .{ .symbol = 2, .bits = 5, .baseline = 32 },
+ .{ .symbol = 3, .bits = 5, .baseline = 0 },
+ .{ .symbol = 5, .bits = 5, .baseline = 0 },
+ .{ .symbol = 6, .bits = 5, .baseline = 0 },
+ .{ .symbol = 8, .bits = 5, .baseline = 0 },
+ .{ .symbol = 10, .bits = 6, .baseline = 0 },
+ .{ .symbol = 13, .bits = 6, .baseline = 0 },
+ .{ .symbol = 16, .bits = 6, .baseline = 0 },
+ .{ .symbol = 19, .bits = 6, .baseline = 0 },
+ .{ .symbol = 22, .bits = 6, .baseline = 0 },
+ .{ .symbol = 25, .bits = 6, .baseline = 0 },
+ .{ .symbol = 28, .bits = 6, .baseline = 0 },
+ .{ .symbol = 31, .bits = 6, .baseline = 0 },
+ .{ .symbol = 33, .bits = 6, .baseline = 0 },
+ .{ .symbol = 35, .bits = 6, .baseline = 0 },
+ .{ .symbol = 37, .bits = 6, .baseline = 0 },
+ .{ .symbol = 39, .bits = 6, .baseline = 0 },
+ .{ .symbol = 41, .bits = 6, .baseline = 0 },
+ .{ .symbol = 43, .bits = 6, .baseline = 0 },
+ .{ .symbol = 45, .bits = 6, .baseline = 0 },
+ .{ .symbol = 1, .bits = 4, .baseline = 16 },
+ .{ .symbol = 2, .bits = 4, .baseline = 0 },
+ .{ .symbol = 3, .bits = 5, .baseline = 32 },
+ .{ .symbol = 4, .bits = 5, .baseline = 0 },
+ .{ .symbol = 6, .bits = 5, .baseline = 32 },
+ .{ .symbol = 7, .bits = 5, .baseline = 0 },
+ .{ .symbol = 9, .bits = 6, .baseline = 0 },
+ .{ .symbol = 12, .bits = 6, .baseline = 0 },
+ .{ .symbol = 15, .bits = 6, .baseline = 0 },
+ .{ .symbol = 18, .bits = 6, .baseline = 0 },
+ .{ .symbol = 21, .bits = 6, .baseline = 0 },
+ .{ .symbol = 24, .bits = 6, .baseline = 0 },
+ .{ .symbol = 27, .bits = 6, .baseline = 0 },
+ .{ .symbol = 30, .bits = 6, .baseline = 0 },
+ .{ .symbol = 32, .bits = 6, .baseline = 0 },
+ .{ .symbol = 34, .bits = 6, .baseline = 0 },
+ .{ .symbol = 36, .bits = 6, .baseline = 0 },
+ .{ .symbol = 38, .bits = 6, .baseline = 0 },
+ .{ .symbol = 40, .bits = 6, .baseline = 0 },
+ .{ .symbol = 42, .bits = 6, .baseline = 0 },
+ .{ .symbol = 44, .bits = 6, .baseline = 0 },
+ .{ .symbol = 1, .bits = 4, .baseline = 32 },
+ .{ .symbol = 1, .bits = 4, .baseline = 48 },
+ .{ .symbol = 2, .bits = 4, .baseline = 16 },
+ .{ .symbol = 4, .bits = 5, .baseline = 32 },
+ .{ .symbol = 5, .bits = 5, .baseline = 32 },
+ .{ .symbol = 7, .bits = 5, .baseline = 32 },
+ .{ .symbol = 8, .bits = 5, .baseline = 32 },
+ .{ .symbol = 11, .bits = 6, .baseline = 0 },
+ .{ .symbol = 14, .bits = 6, .baseline = 0 },
+ .{ .symbol = 17, .bits = 6, .baseline = 0 },
+ .{ .symbol = 20, .bits = 6, .baseline = 0 },
+ .{ .symbol = 23, .bits = 6, .baseline = 0 },
+ .{ .symbol = 26, .bits = 6, .baseline = 0 },
+ .{ .symbol = 29, .bits = 6, .baseline = 0 },
+ .{ .symbol = 52, .bits = 6, .baseline = 0 },
+ .{ .symbol = 51, .bits = 6, .baseline = 0 },
+ .{ .symbol = 50, .bits = 6, .baseline = 0 },
+ .{ .symbol = 49, .bits = 6, .baseline = 0 },
+ .{ .symbol = 48, .bits = 6, .baseline = 0 },
+ .{ .symbol = 47, .bits = 6, .baseline = 0 },
+ .{ .symbol = 46, .bits = 6, .baseline = 0 },
+ },
+ };
+
+ pub const predefined_offset: Table = .{
+ .fse = &[32]Table.Fse{
+ .{ .symbol = 0, .bits = 5, .baseline = 0 },
+ .{ .symbol = 6, .bits = 4, .baseline = 0 },
+ .{ .symbol = 9, .bits = 5, .baseline = 0 },
+ .{ .symbol = 15, .bits = 5, .baseline = 0 },
+ .{ .symbol = 21, .bits = 5, .baseline = 0 },
+ .{ .symbol = 3, .bits = 5, .baseline = 0 },
+ .{ .symbol = 7, .bits = 4, .baseline = 0 },
+ .{ .symbol = 12, .bits = 5, .baseline = 0 },
+ .{ .symbol = 18, .bits = 5, .baseline = 0 },
+ .{ .symbol = 23, .bits = 5, .baseline = 0 },
+ .{ .symbol = 5, .bits = 5, .baseline = 0 },
+ .{ .symbol = 8, .bits = 4, .baseline = 0 },
+ .{ .symbol = 14, .bits = 5, .baseline = 0 },
+ .{ .symbol = 20, .bits = 5, .baseline = 0 },
+ .{ .symbol = 2, .bits = 5, .baseline = 0 },
+ .{ .symbol = 7, .bits = 4, .baseline = 16 },
+ .{ .symbol = 11, .bits = 5, .baseline = 0 },
+ .{ .symbol = 17, .bits = 5, .baseline = 0 },
+ .{ .symbol = 22, .bits = 5, .baseline = 0 },
+ .{ .symbol = 4, .bits = 5, .baseline = 0 },
+ .{ .symbol = 8, .bits = 4, .baseline = 16 },
+ .{ .symbol = 13, .bits = 5, .baseline = 0 },
+ .{ .symbol = 19, .bits = 5, .baseline = 0 },
+ .{ .symbol = 1, .bits = 5, .baseline = 0 },
+ .{ .symbol = 6, .bits = 4, .baseline = 16 },
+ .{ .symbol = 10, .bits = 5, .baseline = 0 },
+ .{ .symbol = 16, .bits = 5, .baseline = 0 },
+ .{ .symbol = 28, .bits = 5, .baseline = 0 },
+ .{ .symbol = 27, .bits = 5, .baseline = 0 },
+ .{ .symbol = 26, .bits = 5, .baseline = 0 },
+ .{ .symbol = 25, .bits = 5, .baseline = 0 },
+ .{ .symbol = 24, .bits = 5, .baseline = 0 },
+ },
+ };
+};
+
+const low_bit_mask = [9]u8{
+ 0b00000000,
+ 0b00000001,
+ 0b00000011,
+ 0b00000111,
+ 0b00001111,
+ 0b00011111,
+ 0b00111111,
+ 0b01111111,
+ 0b11111111,
+};
+
+fn Bits(comptime T: type) type {
+ return struct { T, u16 };
+}
+
+/// For reading the reversed bit streams used to encode FSE compressed data.
+const ReverseBitReader = struct {
+ bytes: []const u8,
+ remaining: usize,
+ bits: u8,
+ count: u4,
+
+ fn init(bytes: []const u8) error{MissingStartBit}!ReverseBitReader {
+ var result: ReverseBitReader = .{
+ .bytes = bytes,
+ .remaining = bytes.len,
+ .bits = 0,
+ .count = 0,
+ };
+ if (bytes.len == 0) return result;
+ for (0..8) |_| if (0 != (result.readBitsNoEof(u1, 1) catch unreachable)) return result;
+ return error.MissingStartBit;
+ }
+
+ fn initBits(comptime T: type, out: anytype, num: u16) Bits(T) {
+ const UT = std.meta.Int(.unsigned, @bitSizeOf(T));
+ return .{
+ @bitCast(@as(UT, @intCast(out))),
+ num,
+ };
+ }
+
+ fn readBitsNoEof(self: *ReverseBitReader, comptime T: type, num: u16) error{EndOfStream}!T {
+ const b, const c = try self.readBitsTuple(T, num);
+ if (c < num) return error.EndOfStream;
+ return b;
+ }
+
+ fn readBits(self: *ReverseBitReader, comptime T: type, num: u16, out_bits: *u16) !T {
+ const b, const c = try self.readBitsTuple(T, num);
+ out_bits.* = c;
+ return b;
+ }
+
+ fn readBitsTuple(self: *ReverseBitReader, comptime T: type, num: u16) !Bits(T) {
+ const UT = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const U = if (@bitSizeOf(T) < 8) u8 else UT;
+
+ if (num <= self.count) return initBits(T, self.removeBits(@intCast(num)), num);
+
+ var out_count: u16 = self.count;
+ var out: U = self.removeBits(self.count);
+
+ const full_bytes_left = (num - out_count) / 8;
+
+ for (0..full_bytes_left) |_| {
+ const byte = takeByte(self) catch |err| switch (err) {
+ error.EndOfStream => return initBits(T, out, out_count),
+ };
+ if (U == u8) out = 0 else out <<= 8;
+ out |= byte;
+ out_count += 8;
+ }
+
+ const bits_left = num - out_count;
+ const keep = 8 - bits_left;
+
+ if (bits_left == 0) return initBits(T, out, out_count);
+
+ const final_byte = takeByte(self) catch |err| switch (err) {
+ error.EndOfStream => return initBits(T, out, out_count),
+ };
+
+ out <<= @intCast(bits_left);
+ out |= final_byte >> @intCast(keep);
+ self.bits = final_byte & low_bit_mask[keep];
+
+ self.count = @intCast(keep);
+ return initBits(T, out, num);
+ }
+
+ fn takeByte(rbr: *ReverseBitReader) error{EndOfStream}!u8 {
+ if (rbr.remaining == 0) return error.EndOfStream;
+ rbr.remaining -= 1;
+ return rbr.bytes[rbr.remaining];
+ }
+
+ fn isEmpty(self: *const ReverseBitReader) bool {
+ return self.remaining == 0 and self.count == 0;
+ }
+
+ fn removeBits(self: *ReverseBitReader, num: u4) u8 {
+ if (num == 8) {
+ self.count = 0;
+ return self.bits;
+ }
+
+ const keep = self.count - num;
+ const bits = self.bits >> @intCast(keep);
+ self.bits &= low_bit_mask[keep];
+
+ self.count = keep;
+ return bits;
+ }
+};
+
+const BitReader = struct {
+ bytes: []const u8,
+ index: usize = 0,
+ bits: u8 = 0,
+ count: u4 = 0,
+
+ fn initBits(comptime T: type, out: anytype, num: u16) Bits(T) {
+ const UT = std.meta.Int(.unsigned, @bitSizeOf(T));
+ return .{
+ @bitCast(@as(UT, @intCast(out))),
+ num,
+ };
+ }
+
+ fn readBitsNoEof(self: *@This(), comptime T: type, num: u16) !T {
+ const b, const c = try self.readBitsTuple(T, num);
+ if (c < num) return error.EndOfStream;
+ return b;
+ }
+
+ fn readBits(self: *@This(), comptime T: type, num: u16, out_bits: *u16) !T {
+ const b, const c = try self.readBitsTuple(T, num);
+ out_bits.* = c;
+ return b;
+ }
+
+ fn readBitsTuple(self: *@This(), comptime T: type, num: u16) !Bits(T) {
+ const UT = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const U = if (@bitSizeOf(T) < 8) u8 else UT;
+
+ if (num <= self.count) return initBits(T, self.removeBits(@intCast(num)), num);
+
+ var out_count: u16 = self.count;
+ var out: U = self.removeBits(self.count);
+
+ const full_bytes_left = (num - out_count) / 8;
+
+ for (0..full_bytes_left) |_| {
+ const byte = takeByte(self) catch |err| switch (err) {
+ error.EndOfStream => return initBits(T, out, out_count),
+ };
+
+ const pos = @as(U, byte) << @intCast(out_count);
+ out |= pos;
+ out_count += 8;
+ }
+
+ const bits_left = num - out_count;
+ const keep = 8 - bits_left;
+
+ if (bits_left == 0) return initBits(T, out, out_count);
+
+ const final_byte = takeByte(self) catch |err| switch (err) {
+ error.EndOfStream => return initBits(T, out, out_count),
+ };
+
+ const pos = @as(U, final_byte & low_bit_mask[bits_left]) << @intCast(out_count);
+ out |= pos;
+ self.bits = final_byte >> @intCast(bits_left);
+
+ self.count = @intCast(keep);
+ return initBits(T, out, num);
+ }
+
+ fn takeByte(br: *BitReader) error{EndOfStream}!u8 {
+ if (br.bytes.len - br.index == 0) return error.EndOfStream;
+ const result = br.bytes[br.index];
+ br.index += 1;
+ return result;
+ }
+
+ fn removeBits(self: *@This(), num: u4) u8 {
+ if (num == 8) {
+ self.count = 0;
+ return self.bits;
+ }
+
+ const keep = self.count - num;
+ const bits = self.bits & low_bit_mask[num];
+ self.bits >>= @intCast(num);
+ self.count = keep;
+ return bits;
+ }
+
+ fn alignToByte(self: *@This()) void {
+ self.bits = 0;
+ self.count = 0;
+ }
+};
+
+test {
+ _ = Table;
+}
diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig
index 92c8dac796..a580f826f3 100644
--- a/lib/std/crypto/md5.zig
+++ b/lib/std/crypto/md5.zig
@@ -54,12 +54,20 @@ pub const Md5 = struct {
};
}
- pub fn hash(b: []const u8, out: *[digest_length]u8, options: Options) void {
+ pub fn hash(data: []const u8, out: *[digest_length]u8, options: Options) void {
var d = Md5.init(options);
- d.update(b);
+ d.update(data);
d.final(out);
}
+ pub fn hashResult(data: []const u8) [digest_length]u8 {
+ var out: [digest_length]u8 = undefined;
+ var d = Md5.init(.{});
+ d.update(data);
+ d.final(&out);
+ return out;
+ }
+
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index 4e15cd3a09..2583e83d19 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -482,6 +482,7 @@ pub const Header = struct {
is_64: bool,
endian: std.builtin.Endian,
os_abi: OSABI,
+ /// The meaning of this value depends on `os_abi`.
abi_version: u8,
type: ET,
machine: EM,
@@ -494,205 +495,135 @@ pub const Header = struct {
shnum: u16,
shstrndx: u16,
- pub fn program_header_iterator(self: Header, parse_source: anytype) ProgramHeaderIterator(@TypeOf(parse_source)) {
- return ProgramHeaderIterator(@TypeOf(parse_source)){
- .elf_header = self,
- .parse_source = parse_source,
+ pub fn iterateProgramHeaders(h: Header, file_reader: *std.fs.File.Reader) ProgramHeaderIterator {
+ return .{
+ .elf_header = h,
+ .file_reader = file_reader,
};
}
- pub fn section_header_iterator(self: Header, parse_source: anytype) SectionHeaderIterator(@TypeOf(parse_source)) {
- return SectionHeaderIterator(@TypeOf(parse_source)){
- .elf_header = self,
- .parse_source = parse_source,
+ pub fn iterateSectionHeaders(h: Header, file_reader: *std.fs.File.Reader) SectionHeaderIterator {
+ return .{
+ .elf_header = h,
+ .file_reader = file_reader,
};
}
- pub fn read(parse_source: anytype) !Header {
- var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
- try parse_source.seekableStream().seekTo(0);
- try parse_source.deprecatedReader().readNoEof(&hdr_buf);
- return Header.parse(&hdr_buf);
- }
+ pub const ReadError = std.Io.Reader.Error || error{
+ InvalidElfMagic,
+ InvalidElfVersion,
+ InvalidElfClass,
+ InvalidElfEndian,
+ };
- pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header {
- const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf));
- const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf));
- if (!mem.eql(u8, hdr32.e_ident[0..4], MAGIC)) return error.InvalidElfMagic;
- if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
+ pub fn read(r: *std.Io.Reader) ReadError!Header {
+ const buf = try r.peek(@sizeOf(Elf64_Ehdr));
- const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
- ELFCLASS32 => false,
- ELFCLASS64 => true,
- else => return error.InvalidElfClass,
- };
+ if (!mem.eql(u8, buf[0..4], MAGIC)) return error.InvalidElfMagic;
+ if (buf[EI_VERSION] != 1) return error.InvalidElfVersion;
- const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
+ const endian: std.builtin.Endian = switch (buf[EI_DATA]) {
ELFDATA2LSB => .little,
ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
- const need_bswap = endian != native_endian;
+ return switch (buf[EI_CLASS]) {
+ ELFCLASS32 => .init(try r.takeStruct(Elf32_Ehdr, endian), endian),
+ ELFCLASS64 => .init(try r.takeStruct(Elf64_Ehdr, endian), endian),
+ else => return error.InvalidElfClass,
+ };
+ }
+
+ pub fn init(hdr: anytype, endian: std.builtin.Endian) Header {
// Converting integers to exhaustive enums using `@enumFromInt` could cause a panic.
comptime assert(!@typeInfo(OSABI).@"enum".is_exhaustive);
- const os_abi: OSABI = @enumFromInt(hdr32.e_ident[EI_OSABI]);
+ return .{
+ .is_64 = switch (@TypeOf(hdr)) {
+ Elf32_Ehdr => false,
+ Elf64_Ehdr => true,
+ else => @compileError("bad type"),
+ },
+ .endian = endian,
+ .os_abi = @enumFromInt(hdr.e_ident[EI_OSABI]),
+ .abi_version = hdr.e_ident[EI_ABIVERSION],
+ .type = hdr.e_type,
+ .machine = hdr.e_machine,
+ .entry = hdr.e_entry,
+ .phoff = hdr.e_phoff,
+ .shoff = hdr.e_shoff,
+ .phentsize = hdr.e_phentsize,
+ .phnum = hdr.e_phnum,
+ .shentsize = hdr.e_shentsize,
+ .shnum = hdr.e_shnum,
+ .shstrndx = hdr.e_shstrndx,
+ };
+ }
+};
- // The meaning of this value depends on `os_abi` so just make it available as `u8`.
- const abi_version = hdr32.e_ident[EI_ABIVERSION];
+pub const ProgramHeaderIterator = struct {
+ elf_header: Header,
+ file_reader: *std.fs.File.Reader,
+ index: usize = 0,
- const @"type" = if (need_bswap) blk: {
- comptime assert(!@typeInfo(ET).@"enum".is_exhaustive);
- const value = @intFromEnum(hdr32.e_type);
- break :blk @as(ET, @enumFromInt(@byteSwap(value)));
- } else hdr32.e_type;
+ pub fn next(it: *ProgramHeaderIterator) !?Elf64_Phdr {
+ if (it.index >= it.elf_header.phnum) return null;
+ defer it.index += 1;
- const machine = if (need_bswap) blk: {
- comptime assert(!@typeInfo(EM).@"enum".is_exhaustive);
- const value = @intFromEnum(hdr32.e_machine);
- break :blk @as(EM, @enumFromInt(@byteSwap(value)));
- } else hdr32.e_machine;
+ if (it.elf_header.is_64) {
+ const offset = it.elf_header.phoff + @sizeOf(Elf64_Phdr) * it.index;
+ try it.file_reader.seekTo(offset);
+ const phdr = try it.file_reader.interface.takeStruct(Elf64_Phdr, it.elf_header.endian);
+ return phdr;
+ }
- return @as(Header, .{
- .is_64 = is_64,
- .endian = endian,
- .os_abi = os_abi,
- .abi_version = abi_version,
- .type = @"type",
- .machine = machine,
- .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
- .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
- .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
- .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
- .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
- .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
- .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
- .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
- });
+ const offset = it.elf_header.phoff + @sizeOf(Elf32_Phdr) * it.index;
+ try it.file_reader.seekTo(offset);
+ const phdr = try it.file_reader.interface.takeStruct(Elf32_Phdr, it.elf_header.endian);
+ return .{
+ .p_type = phdr.p_type,
+ .p_offset = phdr.p_offset,
+ .p_vaddr = phdr.p_vaddr,
+ .p_paddr = phdr.p_paddr,
+ .p_filesz = phdr.p_filesz,
+ .p_memsz = phdr.p_memsz,
+ .p_flags = phdr.p_flags,
+ .p_align = phdr.p_align,
+ };
}
};
-pub fn ProgramHeaderIterator(comptime ParseSource: anytype) type {
- return struct {
- elf_header: Header,
- parse_source: ParseSource,
- index: usize = 0,
-
- pub fn next(self: *@This()) !?Elf64_Phdr {
- if (self.index >= self.elf_header.phnum) return null;
- defer self.index += 1;
-
- if (self.elf_header.is_64) {
- var phdr: Elf64_Phdr = undefined;
- const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try self.parse_source.seekableStream().seekTo(offset);
- try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&phdr));
-
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == native_endian) return phdr;
-
- // Convert fields to native endianness.
- mem.byteSwapAllFields(Elf64_Phdr, &phdr);
- return phdr;
- }
-
- var phdr: Elf32_Phdr = undefined;
- const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try self.parse_source.seekableStream().seekTo(offset);
- try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&phdr));
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != native_endian) {
- // Convert fields to native endianness.
- mem.byteSwapAllFields(Elf32_Phdr, &phdr);
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Phdr{
- .p_type = phdr.p_type,
- .p_offset = phdr.p_offset,
- .p_vaddr = phdr.p_vaddr,
- .p_paddr = phdr.p_paddr,
- .p_filesz = phdr.p_filesz,
- .p_memsz = phdr.p_memsz,
- .p_flags = phdr.p_flags,
- .p_align = phdr.p_align,
- };
- }
- };
-}
+pub const SectionHeaderIterator = struct {
+ elf_header: Header,
+ file_reader: *std.fs.File.Reader,
+ index: usize = 0,
-pub fn SectionHeaderIterator(comptime ParseSource: anytype) type {
- return struct {
- elf_header: Header,
- parse_source: ParseSource,
- index: usize = 0,
-
- pub fn next(self: *@This()) !?Elf64_Shdr {
- if (self.index >= self.elf_header.shnum) return null;
- defer self.index += 1;
-
- if (self.elf_header.is_64) {
- var shdr: Elf64_Shdr = undefined;
- const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try self.parse_source.seekableStream().seekTo(offset);
- try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&shdr));
-
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == native_endian) return shdr;
-
- // Convert fields to native endianness.
- mem.byteSwapAllFields(Elf64_Shdr, &shdr);
- return shdr;
- }
-
- var shdr: Elf32_Shdr = undefined;
- const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try self.parse_source.seekableStream().seekTo(offset);
- try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&shdr));
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != native_endian) {
- // Convert fields to native endianness.
- mem.byteSwapAllFields(Elf32_Shdr, &shdr);
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Shdr{
- .sh_name = shdr.sh_name,
- .sh_type = shdr.sh_type,
- .sh_flags = shdr.sh_flags,
- .sh_addr = shdr.sh_addr,
- .sh_offset = shdr.sh_offset,
- .sh_size = shdr.sh_size,
- .sh_link = shdr.sh_link,
- .sh_info = shdr.sh_info,
- .sh_addralign = shdr.sh_addralign,
- .sh_entsize = shdr.sh_entsize,
- };
- }
- };
-}
+ pub fn next(it: *SectionHeaderIterator) !?Elf64_Shdr {
+ if (it.index >= it.elf_header.shnum) return null;
+ defer it.index += 1;
-fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
- if (is_64) {
- if (need_bswap) {
- return @byteSwap(int_64);
- } else {
- return int_64;
+ if (it.elf_header.is_64) {
+ try it.file_reader.seekTo(it.elf_header.shoff + @sizeOf(Elf64_Shdr) * it.index);
+ const shdr = try it.file_reader.interface.takeStruct(Elf64_Shdr, it.elf_header.endian);
+ return shdr;
}
- } else {
- return int32(need_bswap, int_32, @TypeOf(int_64));
- }
-}
-fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
- if (need_bswap) {
- return @byteSwap(int_32);
- } else {
- return int_32;
+ try it.file_reader.seekTo(it.elf_header.shoff + @sizeOf(Elf32_Shdr) * it.index);
+ const shdr = try it.file_reader.interface.takeStruct(Elf32_Shdr, it.elf_header.endian);
+ return .{
+ .sh_name = shdr.sh_name,
+ .sh_type = shdr.sh_type,
+ .sh_flags = shdr.sh_flags,
+ .sh_addr = shdr.sh_addr,
+ .sh_offset = shdr.sh_offset,
+ .sh_size = shdr.sh_size,
+ .sh_link = shdr.sh_link,
+ .sh_info = shdr.sh_info,
+ .sh_addralign = shdr.sh_addralign,
+ .sh_entsize = shdr.sh_entsize,
+ };
}
-}
+};
pub const ELFCLASSNONE = 0;
pub const ELFCLASS32 = 1;
@@ -2070,7 +2001,7 @@ pub const R_AARCH64 = enum(u32) {
TLSLE_LDST64_TPREL_LO12 = 558,
/// Likewise; no check.
TLSLE_LDST64_TPREL_LO12_NC = 559,
- /// PC-rel. load immediate 20:2.
+ /// PC-rel. load immediate 20:2.
TLSDESC_LD_PREL19 = 560,
/// PC-rel. ADR immediate 20:0.
TLSDESC_ADR_PREL21 = 561,
diff --git a/lib/std/fs/AtomicFile.zig b/lib/std/fs/AtomicFile.zig
index 17a17f8993..96793aec72 100644
--- a/lib/std/fs/AtomicFile.zig
+++ b/lib/std/fs/AtomicFile.zig
@@ -1,6 +1,13 @@
-file: File,
-// TODO either replace this with rand_buf or use []u16 on Windows
-tmp_path_buf: [tmp_path_len:0]u8,
+const AtomicFile = @This();
+const std = @import("../std.zig");
+const File = std.fs.File;
+const Dir = std.fs.Dir;
+const fs = std.fs;
+const assert = std.debug.assert;
+const posix = std.posix;
+
+file_writer: File.Writer,
+random_integer: u64,
dest_basename: []const u8,
file_open: bool,
file_exists: bool,
@@ -9,35 +16,24 @@ dir: Dir,
pub const InitError = File.OpenError;
-pub const random_bytes_len = 12;
-const tmp_path_len = fs.base64_encoder.calcSize(random_bytes_len);
-
/// Note that the `Dir.atomicFile` API may be more handy than this lower-level function.
pub fn init(
dest_basename: []const u8,
mode: File.Mode,
dir: Dir,
close_dir_on_deinit: bool,
+ write_buffer: []u8,
) InitError!AtomicFile {
- var rand_buf: [random_bytes_len]u8 = undefined;
- var tmp_path_buf: [tmp_path_len:0]u8 = undefined;
-
while (true) {
- std.crypto.random.bytes(rand_buf[0..]);
- const tmp_path = fs.base64_encoder.encode(&tmp_path_buf, &rand_buf);
- tmp_path_buf[tmp_path.len] = 0;
-
- const file = dir.createFile(
- tmp_path,
- .{ .mode = mode, .exclusive = true },
- ) catch |err| switch (err) {
+ const random_integer = std.crypto.random.int(u64);
+ const tmp_sub_path = std.fmt.hex(random_integer);
+ const file = dir.createFile(&tmp_sub_path, .{ .mode = mode, .exclusive = true }) catch |err| switch (err) {
error.PathAlreadyExists => continue,
else => |e| return e,
};
-
- return AtomicFile{
- .file = file,
- .tmp_path_buf = tmp_path_buf,
+ return .{
+ .file_writer = file.writer(write_buffer),
+ .random_integer = random_integer,
.dest_basename = dest_basename,
.file_open = true,
.file_exists = true,
@@ -48,41 +44,51 @@ pub fn init(
}
/// Always call deinit, even after a successful finish().
-pub fn deinit(self: *AtomicFile) void {
- if (self.file_open) {
- self.file.close();
- self.file_open = false;
+pub fn deinit(af: *AtomicFile) void {
+ if (af.file_open) {
+ af.file_writer.file.close();
+ af.file_open = false;
}
- if (self.file_exists) {
- self.dir.deleteFile(&self.tmp_path_buf) catch {};
- self.file_exists = false;
+ if (af.file_exists) {
+ const tmp_sub_path = std.fmt.hex(af.random_integer);
+ af.dir.deleteFile(&tmp_sub_path) catch {};
+ af.file_exists = false;
}
- if (self.close_dir_on_deinit) {
- self.dir.close();
+ if (af.close_dir_on_deinit) {
+ af.dir.close();
}
- self.* = undefined;
+ af.* = undefined;
}
-pub const FinishError = posix.RenameError;
+pub const FlushError = File.WriteError;
+
+pub fn flush(af: *AtomicFile) FlushError!void {
+ af.file_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => return af.file_writer.err.?,
+ };
+}
+
+pub const RenameIntoPlaceError = posix.RenameError;
/// On Windows, this function introduces a period of time where some file
/// system operations on the destination file will result in
/// `error.AccessDenied`, including rename operations (such as the one used in
/// this function).
-pub fn finish(self: *AtomicFile) FinishError!void {
- assert(self.file_exists);
- if (self.file_open) {
- self.file.close();
- self.file_open = false;
+pub fn renameIntoPlace(af: *AtomicFile) RenameIntoPlaceError!void {
+ assert(af.file_exists);
+ if (af.file_open) {
+ af.file_writer.file.close();
+ af.file_open = false;
}
- try posix.renameat(self.dir.fd, self.tmp_path_buf[0..], self.dir.fd, self.dest_basename);
- self.file_exists = false;
+ const tmp_sub_path = std.fmt.hex(af.random_integer);
+ try posix.renameat(af.dir.fd, &tmp_sub_path, af.dir.fd, af.dest_basename);
+ af.file_exists = false;
}
-const AtomicFile = @This();
-const std = @import("../std.zig");
-const File = std.fs.File;
-const Dir = std.fs.Dir;
-const fs = std.fs;
-const assert = std.debug.assert;
-const posix = std.posix;
+pub const FinishError = FlushError || RenameIntoPlaceError;
+
+/// Combination of `flush` followed by `renameIntoPlace`.
+pub fn finish(af: *AtomicFile) FinishError!void {
+ try af.flush();
+ try af.renameIntoPlace();
+}
diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig
index 27d97a00cb..16418d216f 100644
--- a/lib/std/fs/Dir.zig
+++ b/lib/std/fs/Dir.zig
@@ -1,3 +1,20 @@
+const Dir = @This();
+const builtin = @import("builtin");
+const std = @import("../std.zig");
+const File = std.fs.File;
+const AtomicFile = std.fs.AtomicFile;
+const base64_encoder = fs.base64_encoder;
+const posix = std.posix;
+const mem = std.mem;
+const path = fs.path;
+const fs = std.fs;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const linux = std.os.linux;
+const windows = std.os.windows;
+const native_os = builtin.os.tag;
+const have_flock = @TypeOf(posix.system.flock) != void;
+
fd: Handle,
pub const Handle = posix.fd_t;
@@ -1862,9 +1879,10 @@ pub fn symLinkW(
/// Same as `symLink`, except tries to create the symbolic link until it
/// succeeds or encounters an error other than `error.PathAlreadyExists`.
-/// On Windows, both paths should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+///
+/// * On Windows, both paths should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
+/// * On WASI, both paths should be encoded as valid UTF-8.
+/// * On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
pub fn atomicSymLink(
dir: Dir,
target_path: []const u8,
@@ -1880,9 +1898,8 @@ pub fn atomicSymLink(
const dirname = path.dirname(sym_link_path) orelse ".";
- var rand_buf: [AtomicFile.random_bytes_len]u8 = undefined;
-
- const temp_path_len = dirname.len + 1 + base64_encoder.calcSize(rand_buf.len);
+ const rand_len = @sizeOf(u64) * 2;
+ const temp_path_len = dirname.len + 1 + rand_len;
var temp_path_buf: [fs.max_path_bytes]u8 = undefined;
if (temp_path_len > temp_path_buf.len) return error.NameTooLong;
@@ -1892,8 +1909,8 @@ pub fn atomicSymLink(
const temp_path = temp_path_buf[0..temp_path_len];
while (true) {
- crypto.random.bytes(rand_buf[0..]);
- _ = base64_encoder.encode(temp_path[dirname.len + 1 ..], rand_buf[0..]);
+ const random_integer = std.crypto.random.int(u64);
+ temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer);
if (dir.symLink(target_path, temp_path, flags)) {
return dir.rename(temp_path, sym_link_path);
@@ -2552,25 +2569,42 @@ pub fn updateFile(
try dest_dir.makePath(dirname);
}
- var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = actual_mode });
+ var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available.
+ var atomic_file = try dest_dir.atomicFile(dest_path, .{
+ .mode = actual_mode,
+ .write_buffer = &buffer,
+ });
defer atomic_file.deinit();
- try atomic_file.file.writeFileAll(src_file, .{ .in_len = src_stat.size });
- try atomic_file.file.updateTimes(src_stat.atime, src_stat.mtime);
+ var src_reader: File.Reader = .initSize(src_file, &.{}, src_stat.size);
+ const dest_writer = &atomic_file.file_writer.interface;
+
+ _ = dest_writer.sendFileAll(&src_reader, .unlimited) catch |err| switch (err) {
+ error.ReadFailed => return src_reader.err.?,
+ error.WriteFailed => return atomic_file.file_writer.err.?,
+ };
+ try atomic_file.file_writer.file.updateTimes(src_stat.atime, src_stat.mtime);
try atomic_file.finish();
- return PrevStatus.stale;
+ return .stale;
}
pub const CopyFileError = File.OpenError || File.StatError ||
- AtomicFile.InitError || CopyFileRawError || AtomicFile.FinishError;
+ AtomicFile.InitError || AtomicFile.FinishError ||
+ File.ReadError || File.WriteError;
-/// Guaranteed to be atomic.
-/// On Linux, until https://patchwork.kernel.org/patch/9636735/ is merged and readily available,
-/// there is a possibility of power loss or application termination leaving temporary files present
-/// in the same directory as dest_path.
-/// On Windows, both paths should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+/// Atomically creates a new file at `dest_path` within `dest_dir` with the
+/// same contents as `source_path` within `source_dir`, overwriting any already
+/// existing file.
+///
+/// On Linux, until https://patchwork.kernel.org/patch/9636735/ is merged and
+/// readily available, there is a possibility of power loss or application
+/// termination leaving temporary files present in the same directory as
+/// dest_path.
+///
+/// On Windows, both paths should be encoded as
+/// [WTF-8](https://simonsapin.github.io/wtf-8/). On WASI, both paths should be
+/// encoded as valid UTF-8. On other platforms, both paths are an opaque
+/// sequence of bytes with no particular encoding.
pub fn copyFile(
source_dir: Dir,
source_path: []const u8,
@@ -2578,79 +2612,34 @@ pub fn copyFile(
dest_path: []const u8,
options: CopyFileOptions,
) CopyFileError!void {
- var in_file = try source_dir.openFile(source_path, .{});
- defer in_file.close();
+ var file_reader: File.Reader = .init(try source_dir.openFile(source_path, .{}), &.{});
+ defer file_reader.file.close();
- var size: ?u64 = null;
const mode = options.override_mode orelse blk: {
- const st = try in_file.stat();
- size = st.size;
+ const st = try file_reader.file.stat();
+ file_reader.size = st.size;
break :blk st.mode;
};
- var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = mode });
+ var buffer: [1024]u8 = undefined; // Used only when direct fd-to-fd is not available.
+ var atomic_file = try dest_dir.atomicFile(dest_path, .{
+ .mode = mode,
+ .write_buffer = &buffer,
+ });
defer atomic_file.deinit();
- try copy_file(in_file.handle, atomic_file.file.handle, size);
- try atomic_file.finish();
-}
-
-const CopyFileRawError = error{SystemResources} || posix.CopyFileRangeError || posix.SendFileError;
-
-// Transfer all the data between two file descriptors in the most efficient way.
-// The copy starts at offset 0, the initial offsets are preserved.
-// No metadata is transferred over.
-fn copy_file(fd_in: posix.fd_t, fd_out: posix.fd_t, maybe_size: ?u64) CopyFileRawError!void {
- if (builtin.target.os.tag.isDarwin()) {
- const rc = posix.system.fcopyfile(fd_in, fd_out, null, .{ .DATA = true });
- switch (posix.errno(rc)) {
- .SUCCESS => return,
- .INVAL => unreachable,
- .NOMEM => return error.SystemResources,
- // The source file is not a directory, symbolic link, or regular file.
- // Try with the fallback path before giving up.
- .OPNOTSUPP => {},
- else => |err| return posix.unexpectedErrno(err),
- }
- }
-
- if (native_os == .linux) {
- // Try copy_file_range first as that works at the FS level and is the
- // most efficient method (if available).
- var offset: u64 = 0;
- cfr_loop: while (true) {
- // The kernel checks the u64 value `offset+count` for overflow, use
- // a 32 bit value so that the syscall won't return EINVAL except for
- // impossibly large files (> 2^64-1 - 2^32-1).
- const amt = try posix.copy_file_range(fd_in, offset, fd_out, offset, std.math.maxInt(u32), 0);
- // Terminate as soon as we have copied size bytes or no bytes
- if (maybe_size) |s| {
- if (s == amt) break :cfr_loop;
- }
- if (amt == 0) break :cfr_loop;
- offset += amt;
- }
- return;
- }
+ _ = atomic_file.file_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return atomic_file.file_writer.err.?,
+ };
- // Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the
- // fallback code will copy the contents chunk by chunk.
- const empty_iovec = [0]posix.iovec_const{};
- var offset: u64 = 0;
- sendfile_loop: while (true) {
- const amt = try posix.sendfile(fd_out, fd_in, offset, 0, &empty_iovec, &empty_iovec, 0);
- // Terminate as soon as we have copied size bytes or no bytes
- if (maybe_size) |s| {
- if (s == amt) break :sendfile_loop;
- }
- if (amt == 0) break :sendfile_loop;
- offset += amt;
- }
+ try atomic_file.finish();
}
pub const AtomicFileOptions = struct {
mode: File.Mode = File.default_mode,
make_path: bool = false,
+ write_buffer: []u8,
};
/// Directly access the `.file` field, and then call `AtomicFile.finish` to
@@ -2668,9 +2657,9 @@ pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions)
else
try self.openDir(dirname, .{});
- return AtomicFile.init(fs.path.basename(dest_path), options.mode, dir, true);
+ return .init(fs.path.basename(dest_path), options.mode, dir, true, options.write_buffer);
} else {
- return AtomicFile.init(dest_path, options.mode, self, false);
+ return .init(dest_path, options.mode, self, false, options.write_buffer);
}
}
@@ -2768,30 +2757,3 @@ pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!v
const file: File = .{ .handle = self.fd };
try file.setPermissions(permissions);
}
-
-const Metadata = File.Metadata;
-pub const MetadataError = File.MetadataError;
-
-/// Returns a `Metadata` struct, representing the permissions on the directory
-pub fn metadata(self: Dir) MetadataError!Metadata {
- const file: File = .{ .handle = self.fd };
- return try file.metadata();
-}
-
-const Dir = @This();
-const builtin = @import("builtin");
-const std = @import("../std.zig");
-const File = std.fs.File;
-const AtomicFile = std.fs.AtomicFile;
-const base64_encoder = fs.base64_encoder;
-const crypto = std.crypto;
-const posix = std.posix;
-const mem = std.mem;
-const path = fs.path;
-const fs = std.fs;
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const linux = std.os.linux;
-const windows = std.os.windows;
-const native_os = builtin.os.tag;
-const have_flock = @TypeOf(posix.system.flock) != void;
diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig
index 5b7e0aa570..fd965babfc 100644
--- a/lib/std/fs/File.zig
+++ b/lib/std/fs/File.zig
@@ -1089,113 +1089,6 @@ pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: u
return total_bytes_copied;
}
-/// Deprecated in favor of `Writer`.
-pub const WriteFileOptions = struct {
- in_offset: u64 = 0,
- in_len: ?u64 = null,
- headers_and_trailers: []posix.iovec_const = &[0]posix.iovec_const{},
- header_count: usize = 0,
-};
-
-/// Deprecated in favor of `Writer`.
-pub const WriteFileError = ReadError || error{EndOfStream} || WriteError;
-
-/// Deprecated in favor of `Writer`.
-pub fn writeFileAll(self: File, in_file: File, args: WriteFileOptions) WriteFileError!void {
- return self.writeFileAllSendfile(in_file, args) catch |err| switch (err) {
- error.Unseekable,
- error.FastOpenAlreadyInProgress,
- error.MessageTooBig,
- error.FileDescriptorNotASocket,
- error.NetworkUnreachable,
- error.NetworkSubsystemFailed,
- error.ConnectionRefused,
- => return self.writeFileAllUnseekable(in_file, args),
- else => |e| return e,
- };
-}
-
-/// Deprecated in favor of `Writer`.
-pub fn writeFileAllUnseekable(self: File, in_file: File, args: WriteFileOptions) WriteFileError!void {
- const headers = args.headers_and_trailers[0..args.header_count];
- const trailers = args.headers_and_trailers[args.header_count..];
- try self.writevAll(headers);
- try in_file.deprecatedReader().skipBytes(args.in_offset, .{ .buf_size = 4096 });
- var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
- if (args.in_len) |len| {
- var stream = std.io.limitedReader(in_file.deprecatedReader(), len);
- try fifo.pump(stream.reader(), self.deprecatedWriter());
- } else {
- try fifo.pump(in_file.deprecatedReader(), self.deprecatedWriter());
- }
- try self.writevAll(trailers);
-}
-
-/// Deprecated in favor of `Writer`.
-fn writeFileAllSendfile(self: File, in_file: File, args: WriteFileOptions) posix.SendFileError!void {
- const count = blk: {
- if (args.in_len) |l| {
- if (l == 0) {
- return self.writevAll(args.headers_and_trailers);
- } else {
- break :blk l;
- }
- } else {
- break :blk 0;
- }
- };
- const headers = args.headers_and_trailers[0..args.header_count];
- const trailers = args.headers_and_trailers[args.header_count..];
- const zero_iovec = &[0]posix.iovec_const{};
- // When reading the whole file, we cannot put the trailers in the sendfile() syscall,
- // because we have no way to determine whether a partial write is past the end of the file or not.
- const trls = if (count == 0) zero_iovec else trailers;
- const offset = args.in_offset;
- const out_fd = self.handle;
- const in_fd = in_file.handle;
- const flags = 0;
- var amt: usize = 0;
- hdrs: {
- var i: usize = 0;
- while (i < headers.len) {
- amt = try posix.sendfile(out_fd, in_fd, offset, count, headers[i..], trls, flags);
- while (amt >= headers[i].len) {
- amt -= headers[i].len;
- i += 1;
- if (i >= headers.len) break :hdrs;
- }
- headers[i].base += amt;
- headers[i].len -= amt;
- }
- }
- if (count == 0) {
- var off: u64 = amt;
- while (true) {
- amt = try posix.sendfile(out_fd, in_fd, offset + off, 0, zero_iovec, zero_iovec, flags);
- if (amt == 0) break;
- off += amt;
- }
- } else {
- var off: u64 = amt;
- while (off < count) {
- amt = try posix.sendfile(out_fd, in_fd, offset + off, count - off, zero_iovec, trailers, flags);
- off += amt;
- }
- amt = @as(usize, @intCast(off - count));
- }
- var i: usize = 0;
- while (i < trailers.len) {
- while (amt >= trailers[i].len) {
- amt -= trailers[i].len;
- i += 1;
- if (i >= trailers.len) return;
- }
- trailers[i].base += amt;
- trailers[i].len -= amt;
- amt = try posix.writev(self.handle, trailers[i..]);
- }
-}
-
/// Deprecated in favor of `Reader`.
pub const DeprecatedReader = io.GenericReader(File, ReadError, read);
@@ -1242,7 +1135,7 @@ pub const Reader = struct {
err: ?ReadError = null,
mode: Reader.Mode = .positional,
/// Tracks the true seek position in the file. To obtain the logical
- /// position, subtract the buffer size from this value.
+ /// position, use `logicalPos`.
pos: u64 = 0,
size: ?u64 = null,
size_err: ?GetEndPosError = null,
@@ -1335,14 +1228,12 @@ pub const Reader = struct {
pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
switch (r.mode) {
.positional, .positional_reading => {
- // TODO: make += operator allow any integer types
- r.pos = @intCast(@as(i64, @intCast(r.pos)) + offset);
+ setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
},
.streaming, .streaming_reading => {
const seek_err = r.seek_err orelse e: {
if (posix.lseek_CUR(r.file.handle, offset)) |_| {
- // TODO: make += operator allow any integer types
- r.pos = @intCast(@as(i64, @intCast(r.pos)) + offset);
+ setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
return;
} else |err| {
r.seek_err = err;
@@ -1358,6 +1249,8 @@ pub const Reader = struct {
r.pos += n;
remaining -= n;
}
+ r.interface.seek = 0;
+ r.interface.end = 0;
},
.failure => return r.seek_err.?,
}
@@ -1366,7 +1259,7 @@ pub const Reader = struct {
pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
switch (r.mode) {
.positional, .positional_reading => {
- r.pos = offset;
+ setPosAdjustingBuffer(r, offset);
},
.streaming, .streaming_reading => {
if (offset >= r.pos) return Reader.seekBy(r, @intCast(offset - r.pos));
@@ -1375,12 +1268,28 @@ pub const Reader = struct {
r.seek_err = err;
return err;
};
- r.pos = offset;
+ setPosAdjustingBuffer(r, offset);
},
.failure => return r.seek_err.?,
}
}
+ pub fn logicalPos(r: *const Reader) u64 {
+ return r.pos - r.interface.bufferedLen();
+ }
+
+ fn setPosAdjustingBuffer(r: *Reader, offset: u64) void {
+ const logical_pos = logicalPos(r);
+ if (offset < logical_pos or offset >= r.pos) {
+ r.interface.seek = 0;
+ r.interface.end = 0;
+ r.pos = offset;
+ } else {
+ const logical_delta: usize = @intCast(offset - logical_pos);
+ r.interface.seek += logical_delta;
+ }
+ }
+
/// Number of slices to store on the stack, when trying to send as many byte
/// vectors through the underlying read calls as possible.
const max_buffers_len = 16;
@@ -1526,7 +1435,7 @@ pub const Reader = struct {
}
return 0;
};
- const n = @min(size - pos, std.math.maxInt(i64), @intFromEnum(limit));
+ const n = @min(size - pos, maxInt(i64), @intFromEnum(limit));
file.seekBy(n) catch |err| {
r.seek_err = err;
return 0;
@@ -1645,7 +1554,10 @@ pub const Writer = struct {
return .{
.vtable = &.{
.drain = drain,
- .sendFile = sendFile,
+ .sendFile = switch (builtin.zig_backend) {
+ else => sendFile,
+ .stage2_aarch64 => std.io.Writer.unimplementedSendFile,
+ },
},
.buffer = buffer,
};
@@ -1715,7 +1627,6 @@ pub const Writer = struct {
const pattern = data[data.len - 1];
if (pattern.len == 0 or splat == 0) return 0;
const n = windows.WriteFile(handle, pattern, null) catch |err| {
- std.debug.print("windows write file failed3: {t}\n", .{err});
w.err = err;
return error.WriteFailed;
};
@@ -1817,18 +1728,141 @@ pub const Writer = struct {
file_reader: *Reader,
limit: std.io.Limit,
) std.io.Writer.FileError!usize {
+ const reader_buffered = file_reader.interface.buffered();
+ if (reader_buffered.len >= @intFromEnum(limit))
+ return sendFileBuffered(io_w, file_reader, reader_buffered);
+ const writer_buffered = io_w.buffered();
+ const file_limit = @intFromEnum(limit) - reader_buffered.len;
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const out_fd = w.file.handle;
const in_fd = file_reader.file.handle;
- // TODO try using copy_file_range on FreeBSD
- // TODO try using sendfile on macOS
- // TODO try using sendfile on FreeBSD
+
+ if (file_reader.size) |size| {
+ if (size - file_reader.pos == 0) {
+ if (reader_buffered.len != 0) {
+ return sendFileBuffered(io_w, file_reader, reader_buffered);
+ } else {
+ return error.EndOfStream;
+ }
+ }
+ }
+
+ if (native_os == .freebsd and w.mode == .streaming) sf: {
+ // Try using sendfile on FreeBSD.
+ if (w.sendfile_err != null) break :sf;
+ const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
+ var hdtr_data: std.c.sf_hdtr = undefined;
+ var headers: [2]posix.iovec_const = undefined;
+ var headers_i: u8 = 0;
+ if (writer_buffered.len != 0) {
+ headers[headers_i] = .{ .base = writer_buffered.ptr, .len = writer_buffered.len };
+ headers_i += 1;
+ }
+ if (reader_buffered.len != 0) {
+ headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
+ headers_i += 1;
+ }
+ const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
+ hdtr_data = .{
+ .headers = &headers,
+ .hdr_cnt = headers_i,
+ .trailers = null,
+ .trl_cnt = 0,
+ };
+ break :b &hdtr_data;
+ };
+ var sbytes: std.c.off_t = undefined;
+ const nbytes: usize = @min(file_limit, maxInt(usize));
+ const flags = 0;
+ switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, nbytes, hdtr, &sbytes, flags))) {
+ .SUCCESS, .INTR => {},
+ .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => w.sendfile_err = error.UnsupportedOperation,
+ .BADF => if (builtin.mode == .Debug) @panic("race condition") else {
+ w.sendfile_err = error.Unexpected;
+ },
+ .FAULT => if (builtin.mode == .Debug) @panic("segmentation fault") else {
+ w.sendfile_err = error.Unexpected;
+ },
+ .NOTCONN => w.sendfile_err = error.BrokenPipe,
+ .AGAIN, .BUSY => if (sbytes == 0) {
+ w.sendfile_err = error.WouldBlock;
+ },
+ .IO => w.sendfile_err = error.InputOutput,
+ .PIPE => w.sendfile_err = error.BrokenPipe,
+ .NOBUFS => w.sendfile_err = error.SystemResources,
+ else => |err| w.sendfile_err = posix.unexpectedErrno(err),
+ }
+ if (sbytes == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ const consumed = io_w.consume(@intCast(sbytes));
+ file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
+ return consumed;
+ }
+
+ if (native_os.isDarwin() and w.mode == .streaming) sf: {
+ // Try using sendfile on macOS.
+ if (w.sendfile_err != null) break :sf;
+ const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
+ var hdtr_data: std.c.sf_hdtr = undefined;
+ var headers: [2]posix.iovec_const = undefined;
+ var headers_i: u8 = 0;
+ if (writer_buffered.len != 0) {
+ headers[headers_i] = .{ .base = writer_buffered.ptr, .len = writer_buffered.len };
+ headers_i += 1;
+ }
+ if (reader_buffered.len != 0) {
+ headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
+ headers_i += 1;
+ }
+ const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
+ hdtr_data = .{
+ .headers = &headers,
+ .hdr_cnt = headers_i,
+ .trailers = null,
+ .trl_cnt = 0,
+ };
+ break :b &hdtr_data;
+ };
+ const max_count = maxInt(i32); // Avoid EINVAL.
+ var len: std.c.off_t = @min(file_limit, max_count);
+ const flags = 0;
+ switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, &len, hdtr, flags))) {
+ .SUCCESS, .INTR => {},
+ .OPNOTSUPP, .NOTSOCK, .NOSYS => w.sendfile_err = error.UnsupportedOperation,
+ .BADF => if (builtin.mode == .Debug) @panic("race condition") else {
+ w.sendfile_err = error.Unexpected;
+ },
+ .FAULT => if (builtin.mode == .Debug) @panic("segmentation fault") else {
+ w.sendfile_err = error.Unexpected;
+ },
+ .INVAL => if (builtin.mode == .Debug) @panic("invalid API usage") else {
+ w.sendfile_err = error.Unexpected;
+ },
+ .NOTCONN => w.sendfile_err = error.BrokenPipe,
+ .AGAIN => if (len == 0) {
+ w.sendfile_err = error.WouldBlock;
+ },
+ .IO => w.sendfile_err = error.InputOutput,
+ .PIPE => w.sendfile_err = error.BrokenPipe,
+ else => |err| w.sendfile_err = posix.unexpectedErrno(err),
+ }
+ if (len == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ const consumed = io_w.consume(@bitCast(len));
+ file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
+ return consumed;
+ }
+
if (native_os == .linux and w.mode == .streaming) sf: {
// Try using sendfile on Linux.
if (w.sendfile_err != null) break :sf;
// Linux sendfile does not support headers.
- const buffered = limit.slice(file_reader.interface.buffer);
- if (io_w.end != 0 or buffered.len != 0) return drain(io_w, &.{buffered}, 1);
+ if (writer_buffered.len != 0 or reader_buffered.len != 0)
+ return sendFileBuffered(io_w, file_reader, reader_buffered);
const max_count = 0x7ffff000; // Avoid EINVAL.
var off: std.os.linux.off_t = undefined;
const off_ptr: ?*std.os.linux.off_t, const count: usize = switch (file_reader.mode) {
@@ -1875,6 +1909,7 @@ pub const Writer = struct {
w.pos += n;
return n;
}
+
const copy_file_range = switch (native_os) {
.freebsd => std.os.freebsd.copy_file_range,
.linux => if (std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 })) std.os.linux.wrapped.copy_file_range else {},
@@ -1882,8 +1917,8 @@ pub const Writer = struct {
};
if (@TypeOf(copy_file_range) != void) cfr: {
if (w.copy_file_range_err != null) break :cfr;
- const buffered = limit.slice(file_reader.interface.buffer);
- if (io_w.end != 0 or buffered.len != 0) return drain(io_w, &.{buffered}, 1);
+ if (writer_buffered.len != 0 or reader_buffered.len != 0)
+ return sendFileBuffered(io_w, file_reader, reader_buffered);
var off_in: i64 = undefined;
var off_out: i64 = undefined;
const off_in_ptr: ?*i64 = switch (file_reader.mode) {
@@ -1922,6 +1957,9 @@ pub const Writer = struct {
if (file_reader.pos != 0) break :fcf;
if (w.pos != 0) break :fcf;
if (limit != .unlimited) break :fcf;
+ const size = file_reader.getSize() catch break :fcf;
+ if (writer_buffered.len != 0 or reader_buffered.len != 0)
+ return sendFileBuffered(io_w, file_reader, reader_buffered);
const rc = std.c.fcopyfile(in_fd, out_fd, null, .{ .DATA = true });
switch (posix.errno(rc)) {
.SUCCESS => {},
@@ -1942,15 +1980,24 @@ pub const Writer = struct {
return 0;
},
}
- const n = if (file_reader.size) |size| size else @panic("TODO figure out how much copied");
- file_reader.pos = n;
- w.pos = n;
- return n;
+ file_reader.pos = size;
+ w.pos = size;
+ return size;
}
return error.Unimplemented;
}
+ fn sendFileBuffered(
+ io_w: *std.io.Writer,
+ file_reader: *Reader,
+ reader_buffered: []const u8,
+ ) std.io.Writer.FileError!usize {
+ const n = try drain(io_w, &.{reader_buffered}, 1);
+ file_reader.seekTo(file_reader.pos + n) catch return error.ReadFailed;
+ return n;
+ }
+
pub fn seekTo(w: *Writer, offset: u64) SeekError!void {
switch (w.mode) {
.positional, .positional_reading => {
@@ -1979,7 +2026,19 @@ pub const Writer = struct {
/// along with other write failures.
pub fn end(w: *Writer) EndError!void {
try w.interface.flush();
- return w.file.setEndPos(w.pos);
+ switch (w.mode) {
+ .positional,
+ .positional_reading,
+ => w.file.setEndPos(w.pos) catch |err| switch (err) {
+ error.NonResizable => return,
+ else => |e| return e,
+ },
+
+ .streaming,
+ .streaming_reading,
+ .failure,
+ => {},
+ }
}
};
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 50cbccf270..4b63873af5 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1499,32 +1499,18 @@ test "sendfile" {
const header2 = "second header\n";
const trailer1 = "trailer1\n";
const trailer2 = "second trailer\n";
- var hdtr = [_]posix.iovec_const{
- .{
- .base = header1,
- .len = header1.len,
- },
- .{
- .base = header2,
- .len = header2.len,
- },
- .{
- .base = trailer1,
- .len = trailer1.len,
- },
- .{
- .base = trailer2,
- .len = trailer2.len,
- },
- };
+ var headers: [2][]const u8 = .{ header1, header2 };
+ var trailers: [2][]const u8 = .{ trailer1, trailer2 };
var written_buf: [100]u8 = undefined;
- try dest_file.writeFileAll(src_file, .{
- .in_offset = 1,
- .in_len = 10,
- .headers_and_trailers = &hdtr,
- .header_count = 2,
- });
+ var file_reader = src_file.reader(&.{});
+ var fallback_buffer: [50]u8 = undefined;
+ var file_writer = dest_file.writer(&fallback_buffer);
+ try file_writer.interface.writeVecAll(&headers);
+ try file_reader.seekTo(1);
+ try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10)));
+ try file_writer.interface.writeVecAll(&trailers);
+ try file_writer.interface.flush();
const amt = try dest_file.preadAll(&written_buf, 0);
try testing.expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
}
@@ -1595,9 +1581,10 @@ test "AtomicFile" {
;
{
- var af = try ctx.dir.atomicFile(test_out_file, .{});
+ var buffer: [100]u8 = undefined;
+ var af = try ctx.dir.atomicFile(test_out_file, .{ .write_buffer = &buffer });
defer af.deinit();
- try af.file.writeAll(test_content);
+ try af.file_writer.interface.writeAll(test_content);
try af.finish();
}
const content = try ctx.dir.readFileAlloc(allocator, test_out_file, 9999);
@@ -2073,7 +2060,7 @@ test "invalid UTF-8/WTF-8 paths" {
}
test "read file non vectored" {
- var tmp_dir = std.testing.tmpDir(.{});
+ var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
const contents = "hello, world!\n";
@@ -2098,6 +2085,47 @@ test "read file non vectored" {
else => |e| return e,
};
}
- try std.testing.expectEqualStrings(contents, w.buffered());
- try std.testing.expectEqual(contents.len, i);
+ try testing.expectEqualStrings(contents, w.buffered());
+ try testing.expectEqual(contents.len, i);
+}
+
+test "seek keeping partial buffer" {
+ var tmp_dir = testing.tmpDir(.{});
+ defer tmp_dir.cleanup();
+
+ const contents = "0123456789";
+
+ const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
+ defer file.close();
+ {
+ var file_writer: std.fs.File.Writer = .init(file, &.{});
+ try file_writer.interface.writeAll(contents);
+ try file_writer.interface.flush();
+ }
+
+ var read_buffer: [3]u8 = undefined;
+ var file_reader: std.fs.File.Reader = .init(file, &read_buffer);
+
+ try testing.expectEqual(0, file_reader.logicalPos());
+
+ var buf: [4]u8 = undefined;
+ try file_reader.interface.readSliceAll(&buf);
+
+ if (file_reader.interface.bufferedLen() != 3) {
+ // Pass the test if the OS doesn't give us vectored reads.
+ return;
+ }
+
+ try testing.expectEqual(4, file_reader.logicalPos());
+ try testing.expectEqual(7, file_reader.pos);
+ try file_reader.seekTo(6);
+ try testing.expectEqual(6, file_reader.logicalPos());
+ try testing.expectEqual(7, file_reader.pos);
+
+ try testing.expectEqualStrings("0123", &buf);
+
+ const n = try file_reader.interface.readSliceShort(&buf);
+ try testing.expectEqual(4, n);
+
+ try testing.expectEqualStrings("6789", &buf);
}
diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig
index 25d3e44253..886aed72dc 100644
--- a/lib/std/http/Server.zig
+++ b/lib/std/http/Server.zig
@@ -129,11 +129,10 @@ pub const Request = struct {
pub const Compression = union(enum) {
pub const DeflateDecompressor = std.compress.zlib.Decompressor(std.io.AnyReader);
pub const GzipDecompressor = std.compress.gzip.Decompressor(std.io.AnyReader);
- pub const ZstdDecompressor = std.compress.zstd.Decompressor(std.io.AnyReader);
deflate: DeflateDecompressor,
gzip: GzipDecompressor,
- zstd: ZstdDecompressor,
+ zstd: std.compress.zstd.Decompress,
none: void,
};
diff --git a/lib/std/json.zig b/lib/std/json.zig
index f81ac1cd65..c7b7dcf19f 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -69,7 +69,6 @@ pub const ArrayHashMap = @import("json/hashmap.zig").ArrayHashMap;
pub const Scanner = @import("json/Scanner.zig");
pub const validate = Scanner.validate;
pub const Error = Scanner.Error;
-pub const reader = Scanner.reader;
pub const default_buffer_size = Scanner.default_buffer_size;
pub const Token = Scanner.Token;
pub const TokenType = Scanner.TokenType;
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 1cd9a83a14..9f2d12a65e 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -45,6 +45,7 @@ pub const rad_per_deg = 0.017453292519943295769236907684886127134428718885417254
/// 180.0/pi
pub const deg_per_rad = 57.295779513082320876798154814105170332405472466564321549160243861;
+pub const Sign = enum(u1) { positive, negative };
pub const FloatRepr = float.FloatRepr;
pub const floatExponentBits = float.floatExponentBits;
pub const floatMantissaBits = float.floatMantissaBits;
@@ -594,27 +595,30 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// Shifts left. Overflowed bits are truncated.
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
+ const is_shl = shift_amt >= 0;
const abs_shift_amt = @abs(shift_amt);
-
- const casted_shift_amt = blk: {
- if (@typeInfo(T) == .vector) {
- const C = @typeInfo(T).vector.child;
- const len = @typeInfo(T).vector.len;
- if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
- break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
- } else {
- if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
- break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
- }
+ const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
+ .int => |info| {
+ if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
+ Log2Int(T),
+ @intCast(abs_shift_amt),
+ );
+ if (info.signedness == .unsigned or is_shl) return 0;
+ return a >> (info.bits - 1);
+ },
+ .vector => |info| {
+ const Child = info.child;
+ const child_info = @typeInfo(Child).int;
+ if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
+ @Vector(info.len, Log2Int(Child)),
+ @splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
+ );
+ if (child_info.signedness == .unsigned or is_shl) return @splat(0);
+ return a >> @splat(child_info.bits - 1);
+ },
+ else => comptime unreachable,
};
-
- if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
- if (shift_amt < 0) {
- return a >> casted_shift_amt;
- }
- }
-
- return a << casted_shift_amt;
+ return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shl {
@@ -629,32 +633,40 @@ test shl {
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) << 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) >> 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
+
+ try testing.expect(shl(i8, -1, -100) == -1);
+ try testing.expect(shl(i8, -1, 100) == 0);
+ try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ -1, 0 }));
+ try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ 0, 0 }));
}
/// Shifts right. Overflowed bits are truncated.
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
+ const is_shl = shift_amt < 0;
const abs_shift_amt = @abs(shift_amt);
-
- const casted_shift_amt = blk: {
- if (@typeInfo(T) == .vector) {
- const C = @typeInfo(T).vector.child;
- const len = @typeInfo(T).vector.len;
- if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
- break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
- } else {
- if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
- break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
- }
+ const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
+ .int => |info| {
+ if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
+ Log2Int(T),
+ @intCast(abs_shift_amt),
+ );
+ if (info.signedness == .unsigned or is_shl) return 0;
+ return a >> (info.bits - 1);
+ },
+ .vector => |info| {
+ const Child = info.child;
+ const child_info = @typeInfo(Child).int;
+ if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
+ @Vector(info.len, Log2Int(Child)),
+ @splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
+ );
+ if (child_info.signedness == .unsigned or is_shl) return @splat(0);
+ return a >> @splat(child_info.bits - 1);
+ },
+ else => comptime unreachable,
};
-
- if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
- if (shift_amt < 0) {
- return a << casted_shift_amt;
- }
- }
-
- return a >> casted_shift_amt;
+ return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shr {
@@ -669,6 +681,11 @@ test shr {
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) >> 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) << 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
+
+ try testing.expect(shr(i8, -1, -100) == 0);
+ try testing.expect(shr(i8, -1, 100) == -1);
+ try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ 0, 0 }));
+ try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ -1, 0 }));
}
/// Rotates right. Only unsigned values can be rotated. Negative shift
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index f44b254cf1..bb6deeb778 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2774,7 +2774,6 @@ test "bitNotWrap more than two limbs" {
// This test requires int sizes greater than 128 bits.
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
// LLVM: unexpected runtime library name: __umodei4
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
diff --git a/lib/std/math/float.zig b/lib/std/math/float.zig
index df7d7fe1ab..6ffbd85bd2 100644
--- a/lib/std/math/float.zig
+++ b/lib/std/math/float.zig
@@ -4,8 +4,6 @@ const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
-pub const Sign = enum(u1) { positive, negative };
-
pub fn FloatRepr(comptime Float: type) type {
const fractional_bits = floatFractionalBits(Float);
const exponent_bits = floatExponentBits(Float);
@@ -14,7 +12,7 @@ pub fn FloatRepr(comptime Float: type) type {
mantissa: StoredMantissa,
exponent: BiasedExponent,
- sign: Sign,
+ sign: std.math.Sign,
pub const StoredMantissa = @Type(.{ .int = .{
.signedness = .unsigned,
@@ -69,7 +67,7 @@ pub fn FloatRepr(comptime Float: type) type {
/// This currently truncates denormal values, which needs to be fixed before this can be used to
/// produce a rounded value.
- pub fn reconstruct(normalized: Normalized, sign: Sign) Float {
+ pub fn reconstruct(normalized: Normalized, sign: std.math.Sign) Float {
if (normalized.exponent > BiasedExponent.max_normal.unbias()) return @bitCast(Repr{
.mantissa = 0,
.exponent = .infinite,
diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig
index 655a42215e..9ac5c6da24 100644
--- a/lib/std/math/log10.zig
+++ b/lib/std/math/log10.zig
@@ -132,7 +132,6 @@ inline fn less_than_5(x: u32) u32 {
test log10_int {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 1a61076f32..3b72a2b579 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -676,6 +676,7 @@ test lessThan {
const eqlBytes_allowed = switch (builtin.zig_backend) {
// These backends don't support vectors yet.
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> false,
@@ -4482,7 +4483,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
);
asm volatile (""
:
- : [val2] "r" (val2),
+ : [_] "r" (val2),
);
} else doNotOptimizeAway(&val);
},
@@ -4490,7 +4491,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
if ((t.float.bits == 32 or t.float.bits == 64) and builtin.zig_backend != .stage2_c) {
asm volatile (""
:
- : [val] "rm" (val),
+ : [_] "rm" (val),
);
} else doNotOptimizeAway(&val);
},
@@ -4500,7 +4501,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
} else {
asm volatile (""
:
- : [val] "m" (val),
+ : [_] "m" (val),
: .{ .memory = true });
}
},
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 75494145b9..a02451c0fd 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -503,7 +503,6 @@ pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
/// Whether an external or internal getauxval implementation is used.
const extern_getauxval = switch (builtin.zig_backend) {
// Calling extern functions is not yet supported with these backends
- .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
diff --git a/lib/std/posix.zig b/lib/std/posix.zig
index e3e1657705..54c6470d2c 100644
--- a/lib/std/posix.zig
+++ b/lib/std/posix.zig
@@ -192,10 +192,27 @@ pub const iovec_const = extern struct {
len: usize,
};
-pub const ACCMODE = enum(u2) {
- RDONLY = 0,
- WRONLY = 1,
- RDWR = 2,
+pub const ACCMODE = switch (native_os) {
+ // POSIX has a note about the access mode values:
+ //
+ // In historical implementations the value of O_RDONLY is zero. Because of
+ // that, it is not possible to detect the presence of O_RDONLY and another
+ // option. Future implementations should encode O_RDONLY and O_WRONLY as
+ // bit flags so that: O_RDONLY | O_WRONLY == O_RDWR
+ //
+ // In practice SerenityOS is the only system supported by Zig that
+ // implements this suggestion.
+ // https://github.com/SerenityOS/serenity/blob/4adc51fdf6af7d50679c48b39362e062f5a3b2cb/Kernel/API/POSIX/fcntl.h#L28-L30
+ .serenity => enum(u2) {
+ RDONLY = 1,
+ WRONLY = 2,
+ RDWR = 3,
+ },
+ else => enum(u2) {
+ RDONLY = 0,
+ WRONLY = 1,
+ RDWR = 2,
+ },
};
pub const TCSA = enum(c_uint) {
@@ -1035,6 +1052,7 @@ pub const TruncateError = error{
FileBusy,
AccessDenied,
PermissionDenied,
+ NonResizable,
} || UnexpectedError;
/// Length must be positive when treated as an i64.
@@ -1074,7 +1092,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
.PERM => return error.PermissionDenied,
.TXTBSY => return error.FileBusy,
.BADF => unreachable, // Handle not open for writing
- .INVAL => unreachable, // Handle not open for writing, negative length, or non-resizable handle
+ .INVAL => return error.NonResizable,
.NOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
@@ -1090,7 +1108,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
.PERM => return error.PermissionDenied,
.TXTBSY => return error.FileBusy,
.BADF => unreachable, // Handle not open for writing
- .INVAL => unreachable, // Handle not open for writing, negative length, or non-resizable handle
+ .INVAL => return error.NonResizable, // This is returned for /dev/null for example.
else => |err| return unexpectedErrno(err),
}
}
@@ -6326,295 +6344,6 @@ pub fn send(
};
}
-pub const SendFileError = PReadError || WriteError || SendError;
-
-/// Transfer data between file descriptors, with optional headers and trailers.
-///
-/// Returns the number of bytes written, which can be zero.
-///
-/// The `sendfile` call copies `in_len` bytes from one file descriptor to another. When possible,
-/// this is done within the operating system kernel, which can provide better performance
-/// characteristics than transferring data from kernel to user space and back, such as with
-/// `read` and `write` calls. When `in_len` is `0`, it means to copy until the end of the input file has been
-/// reached. Note, however, that partial writes are still possible in this case.
-///
-/// `in_fd` must be a file descriptor opened for reading, and `out_fd` must be a file descriptor
-/// opened for writing. They may be any kind of file descriptor; however, if `in_fd` is not a regular
-/// file system file, it may cause this function to fall back to calling `read` and `write`, in which case
-/// atomicity guarantees no longer apply.
-///
-/// Copying begins reading at `in_offset`. The input file descriptor seek position is ignored and not updated.
-/// If the output file descriptor has a seek position, it is updated as bytes are written. When
-/// `in_offset` is past the end of the input file, it successfully reads 0 bytes.
-///
-/// `flags` has different meanings per operating system; refer to the respective man pages.
-///
-/// These systems support atomically sending everything, including headers and trailers:
-/// * macOS
-/// * FreeBSD
-///
-/// These systems support in-kernel data copying, but headers and trailers are not sent atomically:
-/// * Linux
-///
-/// Other systems fall back to calling `read` / `write`.
-///
-/// Linux has a limit on how many bytes may be transferred in one `sendfile` call, which is `0x7ffff000`
-/// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as
-/// well as stuffing the errno codes into the last `4096` values. This is noted on the `sendfile` man page.
-/// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL.
-/// The corresponding POSIX limit on this is `maxInt(isize)`.
-pub fn sendfile(
- out_fd: fd_t,
- in_fd: fd_t,
- in_offset: u64,
- in_len: u64,
- headers: []const iovec_const,
- trailers: []const iovec_const,
- flags: u32,
-) SendFileError!usize {
- var header_done = false;
- var total_written: usize = 0;
-
- // Prevents EOVERFLOW.
- const size_t = std.meta.Int(.unsigned, @typeInfo(usize).int.bits - 1);
- const max_count = switch (native_os) {
- .linux => 0x7ffff000,
- .macos, .ios, .watchos, .tvos, .visionos => maxInt(i32),
- else => maxInt(size_t),
- };
-
- switch (native_os) {
- .linux => sf: {
- if (headers.len != 0) {
- const amt = try writev(out_fd, headers);
- total_written += amt;
- if (amt < count_iovec_bytes(headers)) return total_written;
- header_done = true;
- }
-
- // Here we match BSD behavior, making a zero count value send as many bytes as possible.
- const adjusted_count = if (in_len == 0) max_count else @min(in_len, max_count);
-
- const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile;
- while (true) {
- var offset: off_t = @bitCast(in_offset);
- const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count);
- switch (errno(rc)) {
- .SUCCESS => {
- const amt: usize = @bitCast(rc);
- total_written += amt;
- if (in_len == 0 and amt == 0) {
- // We have detected EOF from `in_fd`.
- break;
- } else if (amt < in_len) {
- return total_written;
- } else {
- break;
- }
- },
-
- .BADF => unreachable, // Always a race condition.
- .FAULT => unreachable, // Segmentation fault.
- .OVERFLOW => unreachable, // We avoid passing too large of a `count`.
- .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
-
- .INVAL => {
- // EINVAL could be any of the following situations:
- // * Descriptor is not valid or locked
- // * an mmap(2)-like operation is not available for in_fd
- // * count is negative
- // * out_fd has the APPEND flag set
- // Because of the "mmap(2)-like operation" possibility, we fall back to doing read/write
- // manually.
- break :sf;
- },
- .AGAIN => return error.WouldBlock,
- .IO => return error.InputOutput,
- .PIPE => return error.BrokenPipe,
- .NOMEM => return error.SystemResources,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- else => |err| {
- unexpectedErrno(err) catch {};
- break :sf;
- },
- }
- }
-
- if (trailers.len != 0) {
- total_written += try writev(out_fd, trailers);
- }
-
- return total_written;
- },
- .freebsd => sf: {
- var hdtr_data: std.c.sf_hdtr = undefined;
- var hdtr: ?*std.c.sf_hdtr = null;
- if (headers.len != 0 or trailers.len != 0) {
- // Here we carefully avoid `@intCast` by returning partial writes when
- // too many io vectors are provided.
- const hdr_cnt = cast(u31, headers.len) orelse maxInt(u31);
- if (headers.len > hdr_cnt) return writev(out_fd, headers);
-
- const trl_cnt = cast(u31, trailers.len) orelse maxInt(u31);
-
- hdtr_data = std.c.sf_hdtr{
- .headers = headers.ptr,
- .hdr_cnt = hdr_cnt,
- .trailers = trailers.ptr,
- .trl_cnt = trl_cnt,
- };
- hdtr = &hdtr_data;
- }
-
- while (true) {
- var sbytes: off_t = undefined;
- const err = errno(system.sendfile(in_fd, out_fd, @bitCast(in_offset), @min(in_len, max_count), hdtr, &sbytes, flags));
- const amt: usize = @bitCast(sbytes);
- switch (err) {
- .SUCCESS => return amt,
-
- .BADF => unreachable, // Always a race condition.
- .FAULT => unreachable, // Segmentation fault.
- .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
-
- .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => {
- // EINVAL could be any of the following situations:
- // * The fd argument is not a regular file.
- // * The s argument is not a SOCK.STREAM type socket.
- // * The offset argument is negative.
- // Because of some of these possibilities, we fall back to doing read/write
- // manually, the same as ENOSYS.
- break :sf;
- },
-
- .INTR => if (amt != 0) return amt else continue,
-
- .AGAIN => if (amt != 0) {
- return amt;
- } else {
- return error.WouldBlock;
- },
-
- .BUSY => if (amt != 0) {
- return amt;
- } else {
- return error.WouldBlock;
- },
-
- .IO => return error.InputOutput,
- .NOBUFS => return error.SystemResources,
- .PIPE => return error.BrokenPipe,
-
- else => {
- unexpectedErrno(err) catch {};
- if (amt != 0) {
- return amt;
- } else {
- break :sf;
- }
- },
- }
- }
- },
- .macos, .ios, .tvos, .watchos, .visionos => sf: {
- var hdtr_data: std.c.sf_hdtr = undefined;
- var hdtr: ?*std.c.sf_hdtr = null;
- if (headers.len != 0 or trailers.len != 0) {
- // Here we carefully avoid `@intCast` by returning partial writes when
- // too many io vectors are provided.
- const hdr_cnt = cast(u31, headers.len) orelse maxInt(u31);
- if (headers.len > hdr_cnt) return writev(out_fd, headers);
-
- const trl_cnt = cast(u31, trailers.len) orelse maxInt(u31);
-
- hdtr_data = std.c.sf_hdtr{
- .headers = headers.ptr,
- .hdr_cnt = hdr_cnt,
- .trailers = trailers.ptr,
- .trl_cnt = trl_cnt,
- };
- hdtr = &hdtr_data;
- }
-
- while (true) {
- var sbytes: off_t = @min(in_len, max_count);
- const err = errno(system.sendfile(in_fd, out_fd, @bitCast(in_offset), &sbytes, hdtr, flags));
- const amt: usize = @bitCast(sbytes);
- switch (err) {
- .SUCCESS => return amt,
-
- .BADF => unreachable, // Always a race condition.
- .FAULT => unreachable, // Segmentation fault.
- .INVAL => unreachable,
- .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
-
- .OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf,
-
- .INTR => if (amt != 0) return amt else continue,
-
- .AGAIN => if (amt != 0) {
- return amt;
- } else {
- return error.WouldBlock;
- },
-
- .IO => return error.InputOutput,
- .PIPE => return error.BrokenPipe,
-
- else => {
- unexpectedErrno(err) catch {};
- if (amt != 0) {
- return amt;
- } else {
- break :sf;
- }
- },
- }
- }
- },
- else => {}, // fall back to read/write
- }
-
- if (headers.len != 0 and !header_done) {
- const amt = try writev(out_fd, headers);
- total_written += amt;
- if (amt < count_iovec_bytes(headers)) return total_written;
- }
-
- rw: {
- var buf: [8 * 4096]u8 = undefined;
- // Here we match BSD behavior, making a zero count value send as many bytes as possible.
- const adjusted_count = if (in_len == 0) buf.len else @min(buf.len, in_len);
- const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset);
- if (amt_read == 0) {
- if (in_len == 0) {
- // We have detected EOF from `in_fd`.
- break :rw;
- } else {
- return total_written;
- }
- }
- const amt_written = try write(out_fd, buf[0..amt_read]);
- total_written += amt_written;
- if (amt_written < in_len or in_len == 0) return total_written;
- }
-
- if (trailers.len != 0) {
- total_written += try writev(out_fd, trailers);
- }
-
- return total_written;
-}
-
-fn count_iovec_bytes(iovs: []const iovec_const) usize {
- var count: usize = 0;
- for (iovs) |iov| {
- count += iov.len;
- }
- return count;
-}
-
pub const CopyFileRangeError = error{
FileTooBig,
InputOutput,
diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig
index c2effb523a..21cc545f12 100644
--- a/lib/std/process/Child.zig
+++ b/lib/std/process/Child.zig
@@ -14,6 +14,7 @@ const assert = std.debug.assert;
const native_os = builtin.os.tag;
const Allocator = std.mem.Allocator;
const ChildProcess = @This();
+const ArrayList = std.ArrayListUnmanaged;
pub const Id = switch (native_os) {
.windows => windows.HANDLE,
@@ -348,19 +349,6 @@ pub const RunResult = struct {
stderr: []u8,
};
-fn writeFifoDataToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), fifo: *std.io.PollFifo) !void {
- if (fifo.head != 0) fifo.realign();
- if (list.capacity == 0) {
- list.* = .{
- .items = fifo.buf[0..fifo.count],
- .capacity = fifo.buf.len,
- };
- fifo.* = std.io.PollFifo.init(fifo.allocator);
- } else {
- try list.appendSlice(allocator, fifo.buf[0..fifo.count]);
- }
-}
-
/// Collect the output from the process's stdout and stderr. Will return once all output
/// has been collected. This does not mean that the process has ended. `wait` should still
/// be called to wait for and clean up the process.
@@ -370,28 +358,48 @@ pub fn collectOutput(
child: ChildProcess,
/// Used for `stdout` and `stderr`.
allocator: Allocator,
- stdout: *std.ArrayListUnmanaged(u8),
- stderr: *std.ArrayListUnmanaged(u8),
+ stdout: *ArrayList(u8),
+ stderr: *ArrayList(u8),
max_output_bytes: usize,
) !void {
assert(child.stdout_behavior == .Pipe);
assert(child.stderr_behavior == .Pipe);
- var poller = std.io.poll(allocator, enum { stdout, stderr }, .{
+ var poller = std.Io.poll(allocator, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
+ const stdout_r = poller.reader(.stdout);
+ stdout_r.buffer = stdout.allocatedSlice();
+ stdout_r.seek = 0;
+ stdout_r.end = stdout.items.len;
+
+ const stderr_r = poller.reader(.stderr);
+ stderr_r.buffer = stderr.allocatedSlice();
+ stderr_r.seek = 0;
+ stderr_r.end = stderr.items.len;
+
+ defer {
+ stdout.* = .{
+ .items = stdout_r.buffer[0..stdout_r.end],
+ .capacity = stdout_r.buffer.len,
+ };
+ stderr.* = .{
+ .items = stderr_r.buffer[0..stderr_r.end],
+ .capacity = stderr_r.buffer.len,
+ };
+ stdout_r.buffer = &.{};
+ stderr_r.buffer = &.{};
+ }
+
while (try poller.poll()) {
- if (poller.fifo(.stdout).count > max_output_bytes)
+ if (stdout_r.bufferedLen() > max_output_bytes)
return error.StdoutStreamTooLong;
- if (poller.fifo(.stderr).count > max_output_bytes)
+ if (stderr_r.bufferedLen() > max_output_bytes)
return error.StderrStreamTooLong;
}
-
- try writeFifoDataToArrayList(allocator, stdout, poller.fifo(.stdout));
- try writeFifoDataToArrayList(allocator, stderr, poller.fifo(.stderr));
}
pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix.PollError || error{
@@ -421,10 +429,10 @@ pub fn run(args: struct {
child.expand_arg0 = args.expand_arg0;
child.progress_node = args.progress_node;
- var stdout: std.ArrayListUnmanaged(u8) = .empty;
- errdefer stdout.deinit(args.allocator);
- var stderr: std.ArrayListUnmanaged(u8) = .empty;
- errdefer stderr.deinit(args.allocator);
+ var stdout: ArrayList(u8) = .empty;
+ defer stdout.deinit(args.allocator);
+ var stderr: ArrayList(u8) = .empty;
+ defer stderr.deinit(args.allocator);
try child.spawn();
errdefer {
@@ -432,7 +440,7 @@ pub fn run(args: struct {
}
try child.collectOutput(args.allocator, &stdout, &stderr, args.max_output_bytes);
- return RunResult{
+ return .{
.stdout = try stdout.toOwnedSlice(args.allocator),
.stderr = try stderr.toOwnedSlice(args.allocator),
.term = try child.wait(),
@@ -878,12 +886,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
defer cmd_line_cache.deinit();
- var app_buf: std.ArrayListUnmanaged(u16) = .empty;
+ var app_buf: ArrayList(u16) = .empty;
defer app_buf.deinit(self.allocator);
try app_buf.appendSlice(self.allocator, app_name_w);
- var dir_buf: std.ArrayListUnmanaged(u16) = .empty;
+ var dir_buf: ArrayList(u16) = .empty;
defer dir_buf.deinit(self.allocator);
if (cwd_path_w.len > 0) {
@@ -1003,13 +1011,16 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
}
fn writeIntFd(fd: i32, value: ErrInt) !void {
- const file: File = .{ .handle = fd };
- file.deprecatedWriter().writeInt(u64, @intCast(value), .little) catch return error.SystemResources;
+ var buffer: [8]u8 = undefined;
+ var fw: std.fs.File.Writer = .initMode(.{ .handle = fd }, &buffer, .streaming);
+ fw.interface.writeInt(u64, value, .little) catch unreachable;
+ fw.interface.flush() catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
- const file: File = .{ .handle = fd };
- return @intCast(file.deprecatedReader().readInt(u64, .little) catch return error.SystemResources);
+ var buffer: [8]u8 = undefined;
+ var fr: std.fs.File.Reader = .initMode(.{ .handle = fd }, &buffer, .streaming);
+ return @intCast(fr.interface.takeInt(u64, .little) catch return error.SystemResources);
}
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
@@ -1020,8 +1031,8 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
fn windowsCreateProcessPathExt(
allocator: mem.Allocator,
- dir_buf: *std.ArrayListUnmanaged(u16),
- app_buf: *std.ArrayListUnmanaged(u16),
+ dir_buf: *ArrayList(u16),
+ app_buf: *ArrayList(u16),
pathext: [:0]const u16,
cmd_line_cache: *WindowsCommandLineCache,
envp_ptr: ?[*]u16,
@@ -1504,7 +1515,7 @@ const WindowsCommandLineCache = struct {
/// Returns the absolute path of `cmd.exe` within the Windows system directory.
/// The caller owns the returned slice.
fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
- var buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 128);
+ var buf = try ArrayList(u16).initCapacity(allocator, 128);
errdefer buf.deinit(allocator);
while (true) {
const unused_slice = buf.unusedCapacitySlice();
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 22ccda1e40..43355d34f4 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -101,17 +101,11 @@ comptime {
// Simplified start code for stage2 until it supports more language features ///
fn main2() callconv(.c) c_int {
- root.main();
- return 0;
+ return callMain();
}
fn _start2() callconv(.withStackAlign(.c, 1)) noreturn {
- callMain2();
-}
-
-fn callMain2() noreturn {
- root.main();
- exit2(0);
+ std.posix.exit(callMain());
}
fn spirvMain2() callconv(.kernel) void {
@@ -119,51 +113,7 @@ fn spirvMain2() callconv(.kernel) void {
}
fn wWinMainCRTStartup2() callconv(.c) noreturn {
- root.main();
- exit2(0);
-}
-
-fn exit2(code: usize) noreturn {
- switch (native_os) {
- .linux => switch (builtin.cpu.arch) {
- .x86_64 => {
- asm volatile ("syscall"
- :
- : [number] "{rax}" (231),
- [arg1] "{rdi}" (code),
- : .{ .rcx = true, .r11 = true, .memory = true });
- },
- .arm => {
- asm volatile ("svc #0"
- :
- : [number] "{r7}" (1),
- [arg1] "{r0}" (code),
- : .{ .memory = true });
- },
- .aarch64 => {
- asm volatile ("svc #0"
- :
- : [number] "{x8}" (93),
- [arg1] "{x0}" (code),
- : .{ .memory = true });
- },
- .sparc64 => {
- asm volatile ("ta 0x6d"
- :
- : [number] "{g1}" (1),
- [arg1] "{o0}" (code),
- : .{ .o0 = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o6 = true, .o7 = true, .memory = true });
- },
- else => @compileError("TODO"),
- },
- // exits(0)
- .plan9 => std.os.plan9.exits(null),
- .windows => {
- std.os.windows.ntdll.RtlExitUserProcess(@truncate(code));
- },
- else => @compileError("TODO"),
- }
- unreachable;
+ std.posix.exit(callMain());
}
////////////////////////////////////////////////////////////////////////////////
@@ -676,10 +626,11 @@ pub inline fn callMain() u8 {
const result = root.main() catch |err| {
switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> {
- std.debug.print("error: failed with error\n", .{});
+ _ = std.posix.write(std.posix.STDERR_FILENO, "error: failed with error\n") catch {};
return 1;
},
else => {},
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index 729a07db0a..e397677cf3 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -19,7 +19,7 @@ const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
-pub const writer = @import("tar/writer.zig").writer;
+pub const Writer = @import("tar/Writer.zig");
/// Provide this to receive detailed error messages.
/// When this is provided, some errors which would otherwise be returned
@@ -293,28 +293,6 @@ fn nullStr(str: []const u8) []const u8 {
return str;
}
-/// Options for iterator.
-/// Buffers should be provided by the caller.
-pub const IteratorOptions = struct {
- /// Use a buffer with length `std.fs.max_path_bytes` to match file system capabilities.
- file_name_buffer: []u8,
- /// Use a buffer with length `std.fs.max_path_bytes` to match file system capabilities.
- link_name_buffer: []u8,
- /// Collects error messages during unpacking
- diagnostics: ?*Diagnostics = null,
-};
-
-/// Iterates over files in tar archive.
-/// `next` returns each file in tar archive.
-pub fn iterator(reader: anytype, options: IteratorOptions) Iterator(@TypeOf(reader)) {
- return .{
- .reader = reader,
- .diagnostics = options.diagnostics,
- .file_name_buffer = options.file_name_buffer,
- .link_name_buffer = options.link_name_buffer,
- };
-}
-
/// Type of the file returned by iterator `next` method.
pub const FileKind = enum {
directory,
@@ -323,206 +301,192 @@ pub const FileKind = enum {
};
/// Iterator over entries in the tar file represented by reader.
-pub fn Iterator(comptime ReaderType: type) type {
- return struct {
- reader: ReaderType,
- diagnostics: ?*Diagnostics = null,
-
- // buffers for heeader and file attributes
- header_buffer: [Header.SIZE]u8 = undefined,
- file_name_buffer: []u8,
- link_name_buffer: []u8,
-
- // bytes of padding to the end of the block
- padding: usize = 0,
- // not consumed bytes of file from last next iteration
- unread_file_bytes: u64 = 0,
-
- pub const File = struct {
- name: []const u8, // name of file, symlink or directory
- link_name: []const u8, // target name of symlink
- size: u64 = 0, // size of the file in bytes
- mode: u32 = 0,
- kind: FileKind = .file,
-
- unread_bytes: *u64,
- parent_reader: ReaderType,
-
- pub const Reader = std.io.GenericReader(File, ReaderType.Error, File.read);
+pub const Iterator = struct {
+ reader: *std.Io.Reader,
+ diagnostics: ?*Diagnostics = null,
- pub fn reader(self: File) Reader {
- return .{ .context = self };
- }
+ // buffers for heeader and file attributes
+ header_buffer: [Header.SIZE]u8 = undefined,
+ file_name_buffer: []u8,
+ link_name_buffer: []u8,
- pub fn read(self: File, dest: []u8) ReaderType.Error!usize {
- const buf = dest[0..@min(dest.len, self.unread_bytes.*)];
- const n = try self.parent_reader.read(buf);
- self.unread_bytes.* -= n;
- return n;
- }
+ // bytes of padding to the end of the block
+ padding: usize = 0,
+ // not consumed bytes of file from last next iteration
+ unread_file_bytes: u64 = 0,
- // Writes file content to writer.
- pub fn writeAll(self: File, out_writer: anytype) !void {
- var buffer: [4096]u8 = undefined;
+ /// Options for iterator.
+ /// Buffers should be provided by the caller.
+ pub const Options = struct {
+ /// Use a buffer with length `std.fs.max_path_bytes` to match file system capabilities.
+ file_name_buffer: []u8,
+ /// Use a buffer with length `std.fs.max_path_bytes` to match file system capabilities.
+ link_name_buffer: []u8,
+ /// Collects error messages during unpacking
+ diagnostics: ?*Diagnostics = null,
+ };
- while (self.unread_bytes.* > 0) {
- const buf = buffer[0..@min(buffer.len, self.unread_bytes.*)];
- try self.parent_reader.readNoEof(buf);
- try out_writer.writeAll(buf);
- self.unread_bytes.* -= buf.len;
- }
- }
+ /// Iterates over files in tar archive.
+ /// `next` returns each file in tar archive.
+ pub fn init(reader: *std.Io.Reader, options: Options) Iterator {
+ return .{
+ .reader = reader,
+ .diagnostics = options.diagnostics,
+ .file_name_buffer = options.file_name_buffer,
+ .link_name_buffer = options.link_name_buffer,
};
+ }
- const Self = @This();
-
- fn readHeader(self: *Self) !?Header {
- if (self.padding > 0) {
- try self.reader.skipBytes(self.padding, .{});
- }
- const n = try self.reader.readAll(&self.header_buffer);
- if (n == 0) return null;
- if (n < Header.SIZE) return error.UnexpectedEndOfStream;
- const header = Header{ .bytes = self.header_buffer[0..Header.SIZE] };
- if (try header.checkChksum() == 0) return null;
- return header;
- }
+ pub const File = struct {
+ name: []const u8, // name of file, symlink or directory
+ link_name: []const u8, // target name of symlink
+ size: u64 = 0, // size of the file in bytes
+ mode: u32 = 0,
+ kind: FileKind = .file,
+ };
- fn readString(self: *Self, size: usize, buffer: []u8) ![]const u8 {
- if (size > buffer.len) return error.TarInsufficientBuffer;
- const buf = buffer[0..size];
- try self.reader.readNoEof(buf);
- return nullStr(buf);
+ fn readHeader(self: *Iterator) !?Header {
+ if (self.padding > 0) {
+ try self.reader.discardAll(self.padding);
}
+ const n = try self.reader.readSliceShort(&self.header_buffer);
+ if (n == 0) return null;
+ if (n < Header.SIZE) return error.UnexpectedEndOfStream;
+ const header = Header{ .bytes = self.header_buffer[0..Header.SIZE] };
+ if (try header.checkChksum() == 0) return null;
+ return header;
+ }
- fn newFile(self: *Self) File {
- return .{
- .name = self.file_name_buffer[0..0],
- .link_name = self.link_name_buffer[0..0],
- .parent_reader = self.reader,
- .unread_bytes = &self.unread_file_bytes,
- };
- }
+ fn readString(self: *Iterator, size: usize, buffer: []u8) ![]const u8 {
+ if (size > buffer.len) return error.TarInsufficientBuffer;
+ const buf = buffer[0..size];
+ try self.reader.readSliceAll(buf);
+ return nullStr(buf);
+ }
- // Number of padding bytes in the last file block.
- fn blockPadding(size: u64) usize {
- const block_rounded = std.mem.alignForward(u64, size, Header.SIZE); // size rounded to te block boundary
- return @intCast(block_rounded - size);
- }
+ fn newFile(self: *Iterator) File {
+ return .{
+ .name = self.file_name_buffer[0..0],
+ .link_name = self.link_name_buffer[0..0],
+ };
+ }
- /// Iterates through the tar archive as if it is a series of files.
- /// Internally, the tar format often uses entries (header with optional
- /// content) to add meta data that describes the next file. These
- /// entries should not normally be visible to the outside. As such, this
- /// loop iterates through one or more entries until it collects a all
- /// file attributes.
- pub fn next(self: *Self) !?File {
- if (self.unread_file_bytes > 0) {
- // If file content was not consumed by caller
- try self.reader.skipBytes(self.unread_file_bytes, .{});
- self.unread_file_bytes = 0;
- }
- var file: File = self.newFile();
-
- while (try self.readHeader()) |header| {
- const kind = header.kind();
- const size: u64 = try header.size();
- self.padding = blockPadding(size);
-
- switch (kind) {
- // File types to return upstream
- .directory, .normal, .symbolic_link => {
- file.kind = switch (kind) {
- .directory => .directory,
- .normal => .file,
- .symbolic_link => .sym_link,
- else => unreachable,
- };
- file.mode = try header.mode();
-
- // set file attributes if not already set by prefix/extended headers
- if (file.size == 0) {
- file.size = size;
- }
- if (file.link_name.len == 0) {
- file.link_name = try header.linkName(self.link_name_buffer);
- }
- if (file.name.len == 0) {
- file.name = try header.fullName(self.file_name_buffer);
- }
+ // Number of padding bytes in the last file block.
+ fn blockPadding(size: u64) usize {
+ const block_rounded = std.mem.alignForward(u64, size, Header.SIZE); // size rounded to te block boundary
+ return @intCast(block_rounded - size);
+ }
- self.padding = blockPadding(file.size);
- self.unread_file_bytes = file.size;
- return file;
- },
- // Prefix header types
- .gnu_long_name => {
- file.name = try self.readString(@intCast(size), self.file_name_buffer);
- },
- .gnu_long_link => {
- file.link_name = try self.readString(@intCast(size), self.link_name_buffer);
- },
- .extended_header => {
- // Use just attributes from last extended header.
- file = self.newFile();
-
- var rdr = paxIterator(self.reader, @intCast(size));
- while (try rdr.next()) |attr| {
- switch (attr.kind) {
- .path => {
- file.name = try attr.value(self.file_name_buffer);
- },
- .linkpath => {
- file.link_name = try attr.value(self.link_name_buffer);
- },
- .size => {
- var buf: [pax_max_size_attr_len]u8 = undefined;
- file.size = try std.fmt.parseInt(u64, try attr.value(&buf), 10);
- },
- }
- }
- },
- // Ignored header type
- .global_extended_header => {
- self.reader.skipBytes(size, .{}) catch return error.TarHeadersTooBig;
- },
- // All other are unsupported header types
- else => {
- const d = self.diagnostics orelse return error.TarUnsupportedHeader;
- try d.errors.append(d.allocator, .{ .unsupported_file_type = .{
- .file_name = try d.allocator.dupe(u8, header.name()),
- .file_type = kind,
- } });
- if (kind == .gnu_sparse) {
- try self.skipGnuSparseExtendedHeaders(header);
+ /// Iterates through the tar archive as if it is a series of files.
+ /// Internally, the tar format often uses entries (header with optional
+ /// content) to add meta data that describes the next file. These
+ /// entries should not normally be visible to the outside. As such, this
+ /// loop iterates through one or more entries until it collects a all
+ /// file attributes.
+ pub fn next(self: *Iterator) !?File {
+ if (self.unread_file_bytes > 0) {
+ // If file content was not consumed by caller
+ try self.reader.discardAll64(self.unread_file_bytes);
+ self.unread_file_bytes = 0;
+ }
+ var file: File = self.newFile();
+
+ while (try self.readHeader()) |header| {
+ const kind = header.kind();
+ const size: u64 = try header.size();
+ self.padding = blockPadding(size);
+
+ switch (kind) {
+ // File types to return upstream
+ .directory, .normal, .symbolic_link => {
+ file.kind = switch (kind) {
+ .directory => .directory,
+ .normal => .file,
+ .symbolic_link => .sym_link,
+ else => unreachable,
+ };
+ file.mode = try header.mode();
+
+ // set file attributes if not already set by prefix/extended headers
+ if (file.size == 0) {
+ file.size = size;
+ }
+ if (file.link_name.len == 0) {
+ file.link_name = try header.linkName(self.link_name_buffer);
+ }
+ if (file.name.len == 0) {
+ file.name = try header.fullName(self.file_name_buffer);
+ }
+
+ self.padding = blockPadding(file.size);
+ self.unread_file_bytes = file.size;
+ return file;
+ },
+ // Prefix header types
+ .gnu_long_name => {
+ file.name = try self.readString(@intCast(size), self.file_name_buffer);
+ },
+ .gnu_long_link => {
+ file.link_name = try self.readString(@intCast(size), self.link_name_buffer);
+ },
+ .extended_header => {
+ // Use just attributes from last extended header.
+ file = self.newFile();
+
+ var rdr: PaxIterator = .{
+ .reader = self.reader,
+ .size = @intCast(size),
+ };
+ while (try rdr.next()) |attr| {
+ switch (attr.kind) {
+ .path => {
+ file.name = try attr.value(self.file_name_buffer);
+ },
+ .linkpath => {
+ file.link_name = try attr.value(self.link_name_buffer);
+ },
+ .size => {
+ var buf: [pax_max_size_attr_len]u8 = undefined;
+ file.size = try std.fmt.parseInt(u64, try attr.value(&buf), 10);
+ },
}
- self.reader.skipBytes(size, .{}) catch return error.TarHeadersTooBig;
- },
- }
+ }
+ },
+ // Ignored header type
+ .global_extended_header => {
+ self.reader.discardAll64(size) catch return error.TarHeadersTooBig;
+ },
+ // All other are unsupported header types
+ else => {
+ const d = self.diagnostics orelse return error.TarUnsupportedHeader;
+ try d.errors.append(d.allocator, .{ .unsupported_file_type = .{
+ .file_name = try d.allocator.dupe(u8, header.name()),
+ .file_type = kind,
+ } });
+ if (kind == .gnu_sparse) {
+ try self.skipGnuSparseExtendedHeaders(header);
+ }
+ self.reader.discardAll64(size) catch return error.TarHeadersTooBig;
+ },
}
- return null;
}
+ return null;
+ }
- fn skipGnuSparseExtendedHeaders(self: *Self, header: Header) !void {
- var is_extended = header.bytes[482] > 0;
- while (is_extended) {
- var buf: [Header.SIZE]u8 = undefined;
- const n = try self.reader.readAll(&buf);
- if (n < Header.SIZE) return error.UnexpectedEndOfStream;
- is_extended = buf[504] > 0;
- }
- }
- };
-}
+ pub fn streamRemaining(it: *Iterator, file: File, w: *std.Io.Writer) std.Io.Reader.StreamError!void {
+ try it.reader.streamExact64(w, file.size);
+ it.unread_file_bytes = 0;
+ }
-/// Pax attributes iterator.
-/// Size is length of pax extended header in reader.
-fn paxIterator(reader: anytype, size: usize) PaxIterator(@TypeOf(reader)) {
- return PaxIterator(@TypeOf(reader)){
- .reader = reader,
- .size = size,
- };
-}
+ fn skipGnuSparseExtendedHeaders(self: *Iterator, header: Header) !void {
+ var is_extended = header.bytes[482] > 0;
+ while (is_extended) {
+ var buf: [Header.SIZE]u8 = undefined;
+ try self.reader.readSliceAll(&buf);
+ is_extended = buf[504] > 0;
+ }
+ }
+};
const PaxAttributeKind = enum {
path,
@@ -533,108 +497,99 @@ const PaxAttributeKind = enum {
// maxInt(u64) has 20 chars, base 10 in practice we got 24 chars
const pax_max_size_attr_len = 64;
-fn PaxIterator(comptime ReaderType: type) type {
- return struct {
- size: usize, // cumulative size of all pax attributes
- reader: ReaderType,
- // scratch buffer used for reading attribute length and keyword
- scratch: [128]u8 = undefined,
-
- const Self = @This();
-
- const Attribute = struct {
- kind: PaxAttributeKind,
- len: usize, // length of the attribute value
- reader: ReaderType, // reader positioned at value start
-
- // Copies pax attribute value into destination buffer.
- // Must be called with destination buffer of size at least Attribute.len.
- pub fn value(self: Attribute, dst: []u8) ![]const u8 {
- if (self.len > dst.len) return error.TarInsufficientBuffer;
- // assert(self.len <= dst.len);
- const buf = dst[0..self.len];
- const n = try self.reader.readAll(buf);
- if (n < self.len) return error.UnexpectedEndOfStream;
- try validateAttributeEnding(self.reader);
- if (hasNull(buf)) return error.PaxNullInValue;
- return buf;
- }
- };
+pub const PaxIterator = struct {
+ size: usize, // cumulative size of all pax attributes
+ reader: *std.Io.Reader,
- // Iterates over pax attributes. Returns known only known attributes.
- // Caller has to call value in Attribute, to advance reader across value.
- pub fn next(self: *Self) !?Attribute {
- // Pax extended header consists of one or more attributes, each constructed as follows:
- // "%d %s=%s\n", <length>, <keyword>, <value>
- while (self.size > 0) {
- const length_buf = try self.readUntil(' ');
- const length = try std.fmt.parseInt(usize, length_buf, 10); // record length in bytes
-
- const keyword = try self.readUntil('=');
- if (hasNull(keyword)) return error.PaxNullInKeyword;
-
- // calculate value_len
- const value_start = length_buf.len + keyword.len + 2; // 2 separators
- if (length < value_start + 1 or self.size < length) return error.UnexpectedEndOfStream;
- const value_len = length - value_start - 1; // \n separator at end
- self.size -= length;
-
- const kind: PaxAttributeKind = if (eql(keyword, "path"))
- .path
- else if (eql(keyword, "linkpath"))
- .linkpath
- else if (eql(keyword, "size"))
- .size
- else {
- try self.reader.skipBytes(value_len, .{});
- try validateAttributeEnding(self.reader);
- continue;
- };
- if (kind == .size and value_len > pax_max_size_attr_len) {
- return error.PaxSizeAttrOverflow;
- }
- return Attribute{
- .kind = kind,
- .len = value_len,
- .reader = self.reader,
- };
- }
+ const Self = @This();
- return null;
+ const Attribute = struct {
+ kind: PaxAttributeKind,
+ len: usize, // length of the attribute value
+ reader: *std.Io.Reader, // reader positioned at value start
+
+ // Copies pax attribute value into destination buffer.
+ // Must be called with destination buffer of size at least Attribute.len.
+ pub fn value(self: Attribute, dst: []u8) ![]const u8 {
+ if (self.len > dst.len) return error.TarInsufficientBuffer;
+ // assert(self.len <= dst.len);
+ const buf = dst[0..self.len];
+ const n = try self.reader.readSliceShort(buf);
+ if (n < self.len) return error.UnexpectedEndOfStream;
+ try validateAttributeEnding(self.reader);
+ if (hasNull(buf)) return error.PaxNullInValue;
+ return buf;
}
+ };
- fn readUntil(self: *Self, delimiter: u8) ![]const u8 {
- var fbs = std.io.fixedBufferStream(&self.scratch);
- try self.reader.streamUntilDelimiter(fbs.writer(), delimiter, null);
- return fbs.getWritten();
+ // Iterates over pax attributes. Returns known only known attributes.
+ // Caller has to call value in Attribute, to advance reader across value.
+ pub fn next(self: *Self) !?Attribute {
+ // Pax extended header consists of one or more attributes, each constructed as follows:
+ // "%d %s=%s\n", <length>, <keyword>, <value>
+ while (self.size > 0) {
+ const length_buf = try self.reader.takeSentinel(' ');
+ const length = try std.fmt.parseInt(usize, length_buf, 10); // record length in bytes
+
+ const keyword = try self.reader.takeSentinel('=');
+ if (hasNull(keyword)) return error.PaxNullInKeyword;
+
+ // calculate value_len
+ const value_start = length_buf.len + keyword.len + 2; // 2 separators
+ if (length < value_start + 1 or self.size < length) return error.UnexpectedEndOfStream;
+ const value_len = length - value_start - 1; // \n separator at end
+ self.size -= length;
+
+ const kind: PaxAttributeKind = if (eql(keyword, "path"))
+ .path
+ else if (eql(keyword, "linkpath"))
+ .linkpath
+ else if (eql(keyword, "size"))
+ .size
+ else {
+ try self.reader.discardAll(value_len);
+ try validateAttributeEnding(self.reader);
+ continue;
+ };
+ if (kind == .size and value_len > pax_max_size_attr_len) {
+ return error.PaxSizeAttrOverflow;
+ }
+ return .{
+ .kind = kind,
+ .len = value_len,
+ .reader = self.reader,
+ };
}
- fn eql(a: []const u8, b: []const u8) bool {
- return std.mem.eql(u8, a, b);
- }
+ return null;
+ }
- fn hasNull(str: []const u8) bool {
- return (std.mem.indexOfScalar(u8, str, 0)) != null;
- }
+ fn eql(a: []const u8, b: []const u8) bool {
+ return std.mem.eql(u8, a, b);
+ }
- // Checks that each record ends with new line.
- fn validateAttributeEnding(reader: ReaderType) !void {
- if (try reader.readByte() != '\n') return error.PaxInvalidAttributeEnd;
- }
- };
-}
+ fn hasNull(str: []const u8) bool {
+ return (std.mem.indexOfScalar(u8, str, 0)) != null;
+ }
+
+ // Checks that each record ends with new line.
+ fn validateAttributeEnding(reader: *std.Io.Reader) !void {
+ if (try reader.takeByte() != '\n') return error.PaxInvalidAttributeEnd;
+ }
+};
/// Saves tar file content to the file systems.
-pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: PipeOptions) !void {
+pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOptions) !void {
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
- var iter = iterator(reader, .{
+ var file_contents_buffer: [1024]u8 = undefined;
+ var it: Iterator = .init(reader, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
.diagnostics = options.diagnostics,
});
- while (try iter.next()) |file| {
+ while (try it.next()) |file| {
const file_name = stripComponents(file.name, options.strip_components);
if (file_name.len == 0 and file.kind != .directory) {
const d = options.diagnostics orelse return error.TarComponentsOutsideStrippedPrefix;
@@ -656,7 +611,9 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: PipeOptions)
.file => {
if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| {
defer fs_file.close();
- try file.writeAll(fs_file);
+ var file_writer = fs_file.writer(&file_contents_buffer);
+ try it.streamRemaining(file, &file_writer.interface);
+ try file_writer.interface.flush();
} else |err| {
const d = options.diagnostics orelse return err;
try d.errors.append(d.allocator, .{ .unable_to_create_file = .{
@@ -826,11 +783,14 @@ test PaxIterator {
var buffer: [1024]u8 = undefined;
outer: for (cases) |case| {
- var stream = std.io.fixedBufferStream(case.data);
- var iter = paxIterator(stream.reader(), case.data.len);
+ var reader: std.Io.Reader = .fixed(case.data);
+ var it: PaxIterator = .{
+ .size = case.data.len,
+ .reader = &reader,
+ };
var i: usize = 0;
- while (iter.next() catch |err| {
+ while (it.next() catch |err| {
if (case.err) |e| {
try testing.expectEqual(e, err);
continue;
@@ -853,12 +813,6 @@ test PaxIterator {
}
}
-test {
- _ = @import("tar/test.zig");
- _ = @import("tar/writer.zig");
- _ = Diagnostics;
-}
-
test "header parse size" {
const cases = [_]struct {
in: []const u8,
@@ -941,7 +895,7 @@ test "create file and symlink" {
file.close();
}
-test iterator {
+test Iterator {
// Example tar file is created from this tree structure:
// $ tree example
// example
@@ -962,19 +916,19 @@ test iterator {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var fbs = std.io.fixedBufferStream(data);
+ var reader: std.Io.Reader = .fixed(data);
// User provided buffers to the iterator
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
// Create iterator
- var iter = iterator(fbs.reader(), .{
+ var it: Iterator = .init(&reader, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
});
// Iterate over files in example.tar
var file_no: usize = 0;
- while (try iter.next()) |file| : (file_no += 1) {
+ while (try it.next()) |file| : (file_no += 1) {
switch (file.kind) {
.directory => {
switch (file_no) {
@@ -987,10 +941,10 @@ test iterator {
},
.file => {
try testing.expectEqualStrings("example/a/file", file.name);
- // Read file content
var buf: [16]u8 = undefined;
- const n = try file.reader().readAll(&buf);
- try testing.expectEqualStrings("content\n", buf[0..n]);
+ var w: std.Io.Writer = .fixed(&buf);
+ try it.streamRemaining(file, &w);
+ try testing.expectEqualStrings("content\n", w.buffered());
},
.sym_link => {
try testing.expectEqualStrings("example/b/symlink", file.name);
@@ -1021,15 +975,14 @@ test pipeToFileSystem {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
defer tmp.cleanup();
const dir = tmp.dir;
- // Save tar from `reader` to the file system `dir`
- pipeToFileSystem(dir, reader, .{
+ // Save tar from reader to the file system `dir`
+ pipeToFileSystem(dir, &reader, .{
.mode_mode = .ignore,
.strip_components = 1,
.exclude_empty_directories = true,
@@ -1053,8 +1006,7 @@ test pipeToFileSystem {
test "pipeToFileSystem root_dir" {
const data = @embedFile("tar/testdata/example.tar");
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
// with strip_components = 1
{
@@ -1063,7 +1015,7 @@ test "pipeToFileSystem root_dir" {
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, reader, .{
+ pipeToFileSystem(tmp.dir, &reader, .{
.strip_components = 1,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1079,13 +1031,13 @@ test "pipeToFileSystem root_dir" {
// with strip_components = 0
{
- fbs.reset();
+ reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, reader, .{
+ pipeToFileSystem(tmp.dir, &reader, .{
.strip_components = 0,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1102,45 +1054,42 @@ test "pipeToFileSystem root_dir" {
test "findRoot with single file archive" {
const data = @embedFile("tar/testdata/22752.tar");
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("", diagnostics.root_dir);
}
test "findRoot without explicit root dir" {
const data = @embedFile("tar/testdata/19820.tar");
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("root", diagnostics.root_dir);
}
test "pipeToFileSystem strip_components" {
const data = @embedFile("tar/testdata/example.tar");
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, reader, .{
+ pipeToFileSystem(tmp.dir, &reader, .{
.strip_components = 3,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1194,13 +1143,12 @@ test "executable bit" {
const data = @embedFile("tar/testdata/example.tar");
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
- var fbs = std.io.fixedBufferStream(data);
- const reader = fbs.reader();
+ var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
//defer tmp.cleanup();
- pipeToFileSystem(tmp.dir, reader, .{
+ pipeToFileSystem(tmp.dir, &reader, .{
.strip_components = 1,
.exclude_empty_directories = true,
.mode_mode = opt,
@@ -1226,3 +1174,9 @@ test "executable bit" {
}
}
}
+
+test {
+ _ = @import("tar/test.zig");
+ _ = Writer;
+ _ = Diagnostics;
+}
diff --git a/lib/std/tar/Writer.zig b/lib/std/tar/Writer.zig
new file mode 100644
index 0000000000..61ae00b24e
--- /dev/null
+++ b/lib/std/tar/Writer.zig
@@ -0,0 +1,462 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const testing = std.testing;
+const Writer = @This();
+
+const block_size = @sizeOf(Header);
+
+/// Options for writing file/dir/link. If left empty 0o664 is used for
+/// file mode and current time for mtime.
+pub const Options = struct {
+ /// File system permission mode.
+ mode: u32 = 0,
+ /// File system modification time.
+ mtime: u64 = 0,
+};
+
+underlying_writer: *std.Io.Writer,
+prefix: []const u8 = "",
+mtime_now: u64 = 0,
+
+const Error = error{
+ WriteFailed,
+ OctalOverflow,
+ NameTooLong,
+};
+
+/// Sets prefix for all other write* method paths.
+pub fn setRoot(w: *Writer, root: []const u8) Error!void {
+ if (root.len > 0)
+ try w.writeDir(root, .{});
+
+ w.prefix = root;
+}
+
+pub fn writeDir(w: *Writer, sub_path: []const u8, options: Options) Error!void {
+ try w.writeHeader(.directory, sub_path, "", 0, options);
+}
+
+pub const WriteFileError = std.Io.Writer.FileError || Error || std.fs.File.GetEndPosError;
+
+pub fn writeFile(
+ w: *Writer,
+ sub_path: []const u8,
+ file_reader: *std.fs.File.Reader,
+ stat_mtime: i128,
+) WriteFileError!void {
+ const size = try file_reader.getSize();
+ const mtime: u64 = @intCast(@divFloor(stat_mtime, std.time.ns_per_s));
+
+ var header: Header = .{};
+ try w.setPath(&header, sub_path);
+ try header.setSize(size);
+ try header.setMtime(mtime);
+ try header.updateChecksum();
+
+ try w.underlying_writer.writeAll(@ptrCast((&header)[0..1]));
+ _ = try w.underlying_writer.sendFileAll(file_reader, .unlimited);
+ try w.writePadding64(size);
+}
+
+pub const WriteFileStreamError = Error || std.Io.Reader.StreamError;
+
+/// Writes file reading file content from `reader`. Reads exactly `size` bytes
+/// from `reader`, or returns `error.EndOfStream`.
+pub fn writeFileStream(
+ w: *Writer,
+ sub_path: []const u8,
+ size: u64,
+ reader: *std.Io.Reader,
+ options: Options,
+) WriteFileStreamError!void {
+ try w.writeHeader(.regular, sub_path, "", size, options);
+ try reader.streamExact64(w.underlying_writer, size);
+ try w.writePadding64(size);
+}
+
+/// Writes file using bytes buffer `content` for size and file content.
+pub fn writeFileBytes(w: *Writer, sub_path: []const u8, content: []const u8, options: Options) Error!void {
+ try w.writeHeader(.regular, sub_path, "", content.len, options);
+ try w.underlying_writer.writeAll(content);
+ try w.writePadding(content.len);
+}
+
+pub fn writeLink(w: *Writer, sub_path: []const u8, link_name: []const u8, options: Options) Error!void {
+ try w.writeHeader(.symbolic_link, sub_path, link_name, 0, options);
+}
+
+fn writeHeader(
+ w: *Writer,
+ typeflag: Header.FileType,
+ sub_path: []const u8,
+ link_name: []const u8,
+ size: u64,
+ options: Options,
+) Error!void {
+ var header = Header.init(typeflag);
+ try w.setPath(&header, sub_path);
+ try header.setSize(size);
+ try header.setMtime(options.mtime);
+ if (options.mode != 0)
+ try header.setMode(options.mode);
+ if (typeflag == .symbolic_link)
+ header.setLinkname(link_name) catch |err| switch (err) {
+ error.NameTooLong => try w.writeExtendedHeader(.gnu_long_link, &.{link_name}),
+ else => return err,
+ };
+ try header.write(w.underlying_writer);
+}
+
+/// Writes path in posix header, if don't fit (in name+prefix; 100+155
+/// bytes) writes it in gnu extended header.
+fn setPath(w: *Writer, header: *Header, sub_path: []const u8) Error!void {
+ header.setPath(w.prefix, sub_path) catch |err| switch (err) {
+ error.NameTooLong => {
+ // write extended header
+ const buffers: []const []const u8 = if (w.prefix.len == 0)
+ &.{sub_path}
+ else
+ &.{ w.prefix, "/", sub_path };
+ try w.writeExtendedHeader(.gnu_long_name, buffers);
+ },
+ else => return err,
+ };
+}
+
+/// Writes gnu extended header: gnu_long_name or gnu_long_link.
+fn writeExtendedHeader(w: *Writer, typeflag: Header.FileType, buffers: []const []const u8) Error!void {
+ var len: usize = 0;
+ for (buffers) |buf| len += buf.len;
+
+ var header: Header = .init(typeflag);
+ try header.setSize(len);
+ try header.write(w.underlying_writer);
+ for (buffers) |buf|
+ try w.underlying_writer.writeAll(buf);
+ try w.writePadding(len);
+}
+
+fn writePadding(w: *Writer, bytes: usize) std.Io.Writer.Error!void {
+ return writePaddingPos(w, bytes % block_size);
+}
+
+fn writePadding64(w: *Writer, bytes: u64) std.Io.Writer.Error!void {
+ return writePaddingPos(w, @intCast(bytes % block_size));
+}
+
+fn writePaddingPos(w: *Writer, pos: usize) std.Io.Writer.Error!void {
+ if (pos == 0) return;
+ try w.underlying_writer.splatByteAll(0, block_size - pos);
+}
+
+/// According to the specification, tar should finish with two zero blocks, but
+/// "reasonable system must not assume that such a block exists when reading an
+/// archive". Therefore, the Zig standard library recommends to not call this
+/// function.
+pub fn finishPedantically(w: *Writer) std.Io.Writer.Error!void {
+ try w.underlying_writer.splatByteAll(0, block_size * 2);
+}
+
+/// A struct that is exactly 512 bytes and matches tar file format. This is
+/// intended to be used for outputting tar files; for parsing there is
+/// `std.tar.Header`.
+pub const Header = extern struct {
+ // This struct was originally copied from
+ // https://github.com/mattnite/tar/blob/main/src/main.zig which is MIT
+ // licensed.
+ //
+ // The name, linkname, magic, uname, and gname are null-terminated character
+ // strings. All other fields are zero-filled octal numbers in ASCII. Each
+ // numeric field of width w contains w minus 1 digits, and a null.
+ // Reference: https://www.gnu.org/software/tar/manual/html_node/Standard.html
+ // POSIX header: byte offset
+ name: [100]u8 = [_]u8{0} ** 100, // 0
+ mode: [7:0]u8 = default_mode.file, // 100
+ uid: [7:0]u8 = [_:0]u8{0} ** 7, // unused 108
+ gid: [7:0]u8 = [_:0]u8{0} ** 7, // unused 116
+ size: [11:0]u8 = [_:0]u8{'0'} ** 11, // 124
+ mtime: [11:0]u8 = [_:0]u8{'0'} ** 11, // 136
+ checksum: [7:0]u8 = [_:0]u8{' '} ** 7, // 148
+ typeflag: FileType = .regular, // 156
+ linkname: [100]u8 = [_]u8{0} ** 100, // 157
+ magic: [6]u8 = [_]u8{ 'u', 's', 't', 'a', 'r', 0 }, // 257
+ version: [2]u8 = [_]u8{ '0', '0' }, // 263
+ uname: [32]u8 = [_]u8{0} ** 32, // unused 265
+ gname: [32]u8 = [_]u8{0} ** 32, // unused 297
+ devmajor: [7:0]u8 = [_:0]u8{0} ** 7, // unused 329
+ devminor: [7:0]u8 = [_:0]u8{0} ** 7, // unused 337
+ prefix: [155]u8 = [_]u8{0} ** 155, // 345
+ pad: [12]u8 = [_]u8{0} ** 12, // unused 500
+
+ pub const FileType = enum(u8) {
+ regular = '0',
+ symbolic_link = '2',
+ directory = '5',
+ gnu_long_name = 'L',
+ gnu_long_link = 'K',
+ };
+
+ const default_mode = struct {
+ const file = [_:0]u8{ '0', '0', '0', '0', '6', '6', '4' }; // 0o664
+ const dir = [_:0]u8{ '0', '0', '0', '0', '7', '7', '5' }; // 0o775
+ const sym_link = [_:0]u8{ '0', '0', '0', '0', '7', '7', '7' }; // 0o777
+ const other = [_:0]u8{ '0', '0', '0', '0', '0', '0', '0' }; // 0o000
+ };
+
+ pub fn init(typeflag: FileType) Header {
+ return .{
+ .typeflag = typeflag,
+ .mode = switch (typeflag) {
+ .directory => default_mode.dir,
+ .symbolic_link => default_mode.sym_link,
+ .regular => default_mode.file,
+ else => default_mode.other,
+ },
+ };
+ }
+
+ pub fn setSize(w: *Header, size: u64) error{OctalOverflow}!void {
+ try octal(&w.size, size);
+ }
+
+ fn octal(buf: []u8, value: u64) error{OctalOverflow}!void {
+ var remainder: u64 = value;
+ var pos: usize = buf.len;
+ while (remainder > 0 and pos > 0) {
+ pos -= 1;
+ const c: u8 = @as(u8, @intCast(remainder % 8)) + '0';
+ buf[pos] = c;
+ remainder /= 8;
+ if (pos == 0 and remainder > 0) return error.OctalOverflow;
+ }
+ }
+
+ pub fn setMode(w: *Header, mode: u32) error{OctalOverflow}!void {
+ try octal(&w.mode, mode);
+ }
+
+ // Integer number of seconds since January 1, 1970, 00:00 Coordinated Universal Time.
+ // mtime == 0 will use current time
+ pub fn setMtime(w: *Header, mtime: u64) error{OctalOverflow}!void {
+ try octal(&w.mtime, mtime);
+ }
+
+ pub fn updateChecksum(w: *Header) !void {
+ var checksum: usize = ' '; // other 7 w.checksum bytes are initialized to ' '
+ for (std.mem.asBytes(w)) |val|
+ checksum += val;
+ try octal(&w.checksum, checksum);
+ }
+
+ pub fn write(h: *Header, bw: *std.Io.Writer) error{ OctalOverflow, WriteFailed }!void {
+ try h.updateChecksum();
+ try bw.writeAll(std.mem.asBytes(h));
+ }
+
+ pub fn setLinkname(w: *Header, link: []const u8) !void {
+ if (link.len > w.linkname.len) return error.NameTooLong;
+ @memcpy(w.linkname[0..link.len], link);
+ }
+
+ pub fn setPath(w: *Header, prefix: []const u8, sub_path: []const u8) !void {
+ const max_prefix = w.prefix.len;
+ const max_name = w.name.len;
+ const sep = std.fs.path.sep_posix;
+
+ if (prefix.len + sub_path.len > max_name + max_prefix or prefix.len > max_prefix)
+ return error.NameTooLong;
+
+ // both fit into name
+ if (prefix.len > 0 and prefix.len + sub_path.len < max_name) {
+ @memcpy(w.name[0..prefix.len], prefix);
+ w.name[prefix.len] = sep;
+ @memcpy(w.name[prefix.len + 1 ..][0..sub_path.len], sub_path);
+ return;
+ }
+
+ // sub_path fits into name
+ // there is no prefix or prefix fits into prefix
+ if (sub_path.len <= max_name) {
+ @memcpy(w.name[0..sub_path.len], sub_path);
+ @memcpy(w.prefix[0..prefix.len], prefix);
+ return;
+ }
+
+ if (prefix.len > 0) {
+ @memcpy(w.prefix[0..prefix.len], prefix);
+ w.prefix[prefix.len] = sep;
+ }
+ const prefix_pos = if (prefix.len > 0) prefix.len + 1 else 0;
+
+ // add as much to prefix as you can, must split at /
+ const prefix_remaining = max_prefix - prefix_pos;
+ if (std.mem.lastIndexOf(u8, sub_path[0..@min(prefix_remaining, sub_path.len)], &.{'/'})) |sep_pos| {
+ @memcpy(w.prefix[prefix_pos..][0..sep_pos], sub_path[0..sep_pos]);
+ if ((sub_path.len - sep_pos - 1) > max_name) return error.NameTooLong;
+ @memcpy(w.name[0..][0 .. sub_path.len - sep_pos - 1], sub_path[sep_pos + 1 ..]);
+ return;
+ }
+
+ return error.NameTooLong;
+ }
+
+ comptime {
+ assert(@sizeOf(Header) == 512);
+ }
+
+ test "setPath" {
+ const cases = [_]struct {
+ in: []const []const u8,
+ out: []const []const u8,
+ }{
+ .{
+ .in = &.{ "", "123456789" },
+ .out = &.{ "", "123456789" },
+ },
+ // can fit into name
+ .{
+ .in = &.{ "prefix", "sub_path" },
+ .out = &.{ "", "prefix/sub_path" },
+ },
+ // no more both fits into name
+ .{
+ .in = &.{ "prefix", "0123456789/" ** 8 ++ "basename" },
+ .out = &.{ "prefix", "0123456789/" ** 8 ++ "basename" },
+ },
+ // put as much as you can into prefix the rest goes into name
+ .{
+ .in = &.{ "prefix", "0123456789/" ** 10 ++ "basename" },
+ .out = &.{ "prefix/" ++ "0123456789/" ** 9 ++ "0123456789", "basename" },
+ },
+
+ .{
+ .in = &.{ "prefix", "0123456789/" ** 15 ++ "basename" },
+ .out = &.{ "prefix/" ++ "0123456789/" ** 12 ++ "0123456789", "0123456789/0123456789/basename" },
+ },
+ .{
+ .in = &.{ "prefix", "0123456789/" ** 21 ++ "basename" },
+ .out = &.{ "prefix/" ++ "0123456789/" ** 12 ++ "0123456789", "0123456789/" ** 8 ++ "basename" },
+ },
+ .{
+ .in = &.{ "", "012345678/" ** 10 ++ "foo" },
+ .out = &.{ "012345678/" ** 9 ++ "012345678", "foo" },
+ },
+ };
+
+ for (cases) |case| {
+ var header = Header.init(.regular);
+ try header.setPath(case.in[0], case.in[1]);
+ try testing.expectEqualStrings(case.out[0], std.mem.sliceTo(&header.prefix, 0));
+ try testing.expectEqualStrings(case.out[1], std.mem.sliceTo(&header.name, 0));
+ }
+
+ const error_cases = [_]struct {
+ in: []const []const u8,
+ }{
+ // basename can't fit into name (106 characters)
+ .{ .in = &.{ "zig", "test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig" } },
+ // cant fit into 255 + sep
+ .{ .in = &.{ "prefix", "0123456789/" ** 22 ++ "basename" } },
+ // can fit but sub_path can't be split (there is no separator)
+ .{ .in = &.{ "prefix", "0123456789" ** 10 ++ "a" } },
+ .{ .in = &.{ "prefix", "0123456789" ** 14 ++ "basename" } },
+ };
+
+ for (error_cases) |case| {
+ var header = Header.init(.regular);
+ try testing.expectError(
+ error.NameTooLong,
+ header.setPath(case.in[0], case.in[1]),
+ );
+ }
+ }
+};
+
+test {
+ _ = Header;
+}
+
+test "write files" {
+ const files = [_]struct {
+ path: []const u8,
+ content: []const u8,
+ }{
+ .{ .path = "foo", .content = "bar" },
+ .{ .path = "a12345678/" ** 10 ++ "foo", .content = "a" ** 511 },
+ .{ .path = "b12345678/" ** 24 ++ "foo", .content = "b" ** 512 },
+ .{ .path = "c12345678/" ** 25 ++ "foo", .content = "c" ** 513 },
+ .{ .path = "d12345678/" ** 51 ++ "foo", .content = "d" ** 1025 },
+ .{ .path = "e123456789" ** 11, .content = "e" },
+ };
+
+ var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
+ var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
+
+ // with root
+ {
+ const root = "root";
+
+ var output: std.Io.Writer.Allocating = .init(testing.allocator);
+ var w: Writer = .{ .underlying_writer = &output.writer };
+ defer output.deinit();
+ try w.setRoot(root);
+ for (files) |file|
+ try w.writeFileBytes(file.path, file.content, .{});
+
+ var input: std.Io.Reader = .fixed(output.getWritten());
+ var it: std.tar.Iterator = .init(&input, .{
+ .file_name_buffer = &file_name_buffer,
+ .link_name_buffer = &link_name_buffer,
+ });
+
+ // first entry is directory with prefix
+ {
+ const actual = (try it.next()).?;
+ try testing.expectEqualStrings(root, actual.name);
+ try testing.expectEqual(std.tar.FileKind.directory, actual.kind);
+ }
+
+ var i: usize = 0;
+ while (try it.next()) |actual| {
+ defer i += 1;
+ const expected = files[i];
+ try testing.expectEqualStrings(root, actual.name[0..root.len]);
+ try testing.expectEqual('/', actual.name[root.len..][0]);
+ try testing.expectEqualStrings(expected.path, actual.name[root.len + 1 ..]);
+
+ var content: std.Io.Writer.Allocating = .init(testing.allocator);
+ defer content.deinit();
+ try it.streamRemaining(actual, &content.writer);
+ try testing.expectEqualSlices(u8, expected.content, content.getWritten());
+ }
+ }
+ // without root
+ {
+ var output: std.Io.Writer.Allocating = .init(testing.allocator);
+ var w: Writer = .{ .underlying_writer = &output.writer };
+ defer output.deinit();
+ for (files) |file| {
+ var content: std.Io.Reader = .fixed(file.content);
+ try w.writeFileStream(file.path, file.content.len, &content, .{});
+ }
+
+ var input: std.Io.Reader = .fixed(output.getWritten());
+ var it: std.tar.Iterator = .init(&input, .{
+ .file_name_buffer = &file_name_buffer,
+ .link_name_buffer = &link_name_buffer,
+ });
+
+ var i: usize = 0;
+ while (try it.next()) |actual| {
+ defer i += 1;
+ const expected = files[i];
+ try testing.expectEqualStrings(expected.path, actual.name);
+
+ var content: std.Io.Writer.Allocating = .init(testing.allocator);
+ defer content.deinit();
+ try it.streamRemaining(actual, &content.writer);
+ try testing.expectEqualSlices(u8, expected.content, content.getWritten());
+ }
+ try w.finishPedantically();
+ }
+}
diff --git a/lib/std/tar/test.zig b/lib/std/tar/test.zig
index 3bcb5af90c..3356baacb5 100644
--- a/lib/std/tar/test.zig
+++ b/lib/std/tar/test.zig
@@ -18,31 +18,72 @@ const Case = struct {
err: ?anyerror = null, // parsing should fail with this error
};
-const cases = [_]Case{
- .{
- .data = @embedFile("testdata/gnu.tar"),
- .files = &[_]Case.File{
- .{
- .name = "small.txt",
- .size = 5,
- .mode = 0o640,
- },
- .{
- .name = "small2.txt",
- .size = 11,
- .mode = 0o640,
- },
+const gnu_case: Case = .{
+ .data = @embedFile("testdata/gnu.tar"),
+ .files = &[_]Case.File{
+ .{
+ .name = "small.txt",
+ .size = 5,
+ .mode = 0o640,
},
- .chksums = &[_][]const u8{
- "e38b27eaccb4391bdec553a7f3ae6b2f",
- "c65bd2e50a56a2138bf1716f2fd56fe9",
+ .{
+ .name = "small2.txt",
+ .size = 11,
+ .mode = 0o640,
+ },
+ },
+ .chksums = &[_][]const u8{
+ "e38b27eaccb4391bdec553a7f3ae6b2f",
+ "c65bd2e50a56a2138bf1716f2fd56fe9",
+ },
+};
+
+const gnu_multi_headers_case: Case = .{
+ .data = @embedFile("testdata/gnu-multi-hdrs.tar"),
+ .files = &[_]Case.File{
+ .{
+ .name = "GNU2/GNU2/long-path-name",
+ .link_name = "GNU4/GNU4/long-linkpath-name",
+ .kind = .sym_link,
},
},
- .{
+};
+
+const trailing_slash_case: Case = .{
+ .data = @embedFile("testdata/trailing-slash.tar"),
+ .files = &[_]Case.File{
+ .{
+ .name = "123456789/" ** 30,
+ .kind = .directory,
+ },
+ },
+};
+
+const writer_big_long_case: Case = .{
+ // Size in gnu extended format, and name in pax attribute.
+ .data = @embedFile("testdata/writer-big-long.tar"),
+ .files = &[_]Case.File{
+ .{
+ .name = "longname/" ** 15 ++ "16gig.txt",
+ .size = 16 * 1024 * 1024 * 1024,
+ .mode = 0o644,
+ .truncated = true,
+ },
+ },
+};
+
+const fuzz1_case: Case = .{
+ .data = @embedFile("testdata/fuzz1.tar"),
+ .err = error.TarInsufficientBuffer,
+};
+
+test "run test cases" {
+ try testCase(gnu_case);
+ try testCase(.{
.data = @embedFile("testdata/sparse-formats.tar"),
.err = error.TarUnsupportedHeader,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/star.tar"),
.files = &[_]Case.File{
.{
@@ -60,8 +101,8 @@ const cases = [_]Case{
"e38b27eaccb4391bdec553a7f3ae6b2f",
"c65bd2e50a56a2138bf1716f2fd56fe9",
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/v7.tar"),
.files = &[_]Case.File{
.{
@@ -79,8 +120,8 @@ const cases = [_]Case{
"e38b27eaccb4391bdec553a7f3ae6b2f",
"c65bd2e50a56a2138bf1716f2fd56fe9",
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/pax.tar"),
.files = &[_]Case.File{
.{
@@ -99,13 +140,13 @@ const cases = [_]Case{
.chksums = &[_][]const u8{
"3c382e8f5b6631aa2db52643912ffd4a",
},
- },
- .{
+ });
+ try testCase(.{
// pax attribute don't end with \n
.data = @embedFile("testdata/pax-bad-hdr-file.tar"),
.err = error.PaxInvalidAttributeEnd,
- },
- .{
+ });
+ try testCase(.{
// size is in pax attribute
.data = @embedFile("testdata/pax-pos-size-file.tar"),
.files = &[_]Case.File{
@@ -119,8 +160,8 @@ const cases = [_]Case{
.chksums = &[_][]const u8{
"0afb597b283fe61b5d4879669a350556",
},
- },
- .{
+ });
+ try testCase(.{
// has pax records which we are not interested in
.data = @embedFile("testdata/pax-records.tar"),
.files = &[_]Case.File{
@@ -128,8 +169,8 @@ const cases = [_]Case{
.name = "file",
},
},
- },
- .{
+ });
+ try testCase(.{
// has global records which we are ignoring
.data = @embedFile("testdata/pax-global-records.tar"),
.files = &[_]Case.File{
@@ -146,8 +187,8 @@ const cases = [_]Case{
.name = "file4",
},
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/nil-uid.tar"),
.files = &[_]Case.File{
.{
@@ -160,8 +201,8 @@ const cases = [_]Case{
.chksums = &[_][]const u8{
"08d504674115e77a67244beac19668f5",
},
- },
- .{
+ });
+ try testCase(.{
// has xattrs and pax records which we are ignoring
.data = @embedFile("testdata/xattrs.tar"),
.files = &[_]Case.File{
@@ -182,23 +223,14 @@ const cases = [_]Case{
"e38b27eaccb4391bdec553a7f3ae6b2f",
"c65bd2e50a56a2138bf1716f2fd56fe9",
},
- },
- .{
- .data = @embedFile("testdata/gnu-multi-hdrs.tar"),
- .files = &[_]Case.File{
- .{
- .name = "GNU2/GNU2/long-path-name",
- .link_name = "GNU4/GNU4/long-linkpath-name",
- .kind = .sym_link,
- },
- },
- },
- .{
+ });
+ try testCase(gnu_multi_headers_case);
+ try testCase(.{
// has gnu type D (directory) and S (sparse) blocks
.data = @embedFile("testdata/gnu-incremental.tar"),
.err = error.TarUnsupportedHeader,
- },
- .{
+ });
+ try testCase(.{
// should use values only from last pax header
.data = @embedFile("testdata/pax-multi-hdrs.tar"),
.files = &[_]Case.File{
@@ -208,8 +240,8 @@ const cases = [_]Case{
.kind = .sym_link,
},
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/gnu-long-nul.tar"),
.files = &[_]Case.File{
.{
@@ -217,8 +249,8 @@ const cases = [_]Case{
.mode = 0o644,
},
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/gnu-utf8.tar"),
.files = &[_]Case.File{
.{
@@ -226,8 +258,8 @@ const cases = [_]Case{
.mode = 0o644,
},
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/gnu-not-utf8.tar"),
.files = &[_]Case.File{
.{
@@ -235,33 +267,33 @@ const cases = [_]Case{
.mode = 0o644,
},
},
- },
- .{
+ });
+ try testCase(.{
// null in pax key
.data = @embedFile("testdata/pax-nul-xattrs.tar"),
.err = error.PaxNullInKeyword,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/pax-nul-path.tar"),
.err = error.PaxNullInValue,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/neg-size.tar"),
.err = error.TarHeader,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/issue10968.tar"),
.err = error.TarHeader,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/issue11169.tar"),
.err = error.TarHeader,
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/issue12435.tar"),
.err = error.TarHeaderChksum,
- },
- .{
+ });
+ try testCase(.{
// has magic with space at end instead of null
.data = @embedFile("testdata/invalid-go17.tar"),
.files = &[_]Case.File{
@@ -269,8 +301,8 @@ const cases = [_]Case{
.name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/foo",
},
},
- },
- .{
+ });
+ try testCase(.{
.data = @embedFile("testdata/ustar-file-devs.tar"),
.files = &[_]Case.File{
.{
@@ -278,17 +310,9 @@ const cases = [_]Case{
.mode = 0o644,
},
},
- },
- .{
- .data = @embedFile("testdata/trailing-slash.tar"),
- .files = &[_]Case.File{
- .{
- .name = "123456789/" ** 30,
- .kind = .directory,
- },
- },
- },
- .{
+ });
+ try testCase(trailing_slash_case);
+ try testCase(.{
// Has size in gnu extended format. To represent size bigger than 8 GB.
.data = @embedFile("testdata/writer-big.tar"),
.files = &[_]Case.File{
@@ -299,120 +323,92 @@ const cases = [_]Case{
.mode = 0o640,
},
},
- },
- .{
- // Size in gnu extended format, and name in pax attribute.
- .data = @embedFile("testdata/writer-big-long.tar"),
- .files = &[_]Case.File{
- .{
- .name = "longname/" ** 15 ++ "16gig.txt",
- .size = 16 * 1024 * 1024 * 1024,
- .mode = 0o644,
- .truncated = true,
- },
- },
- },
- .{
- .data = @embedFile("testdata/fuzz1.tar"),
- .err = error.TarInsufficientBuffer,
- },
- .{
+ });
+ try testCase(writer_big_long_case);
+ try testCase(fuzz1_case);
+ try testCase(.{
.data = @embedFile("testdata/fuzz2.tar"),
.err = error.PaxSizeAttrOverflow,
- },
-};
-
-// used in test to calculate file chksum
-const Md5Writer = struct {
- h: std.crypto.hash.Md5 = std.crypto.hash.Md5.init(.{}),
-
- pub fn writeAll(self: *Md5Writer, buf: []const u8) !void {
- self.h.update(buf);
- }
-
- pub fn writeByte(self: *Md5Writer, byte: u8) !void {
- self.h.update(&[_]u8{byte});
- }
-
- pub fn chksum(self: *Md5Writer) [32]u8 {
- var s = [_]u8{0} ** 16;
- self.h.final(&s);
- return std.fmt.bytesToHex(s, .lower);
- }
-};
+ });
+}
-test "run test cases" {
+fn testCase(case: Case) !void {
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
- for (cases) |case| {
- var fsb = std.io.fixedBufferStream(case.data);
- var iter = tar.iterator(fsb.reader(), .{
- .file_name_buffer = &file_name_buffer,
- .link_name_buffer = &link_name_buffer,
- });
- var i: usize = 0;
- while (iter.next() catch |err| {
- if (case.err) |e| {
- try testing.expectEqual(e, err);
- continue;
- } else {
- return err;
- }
- }) |actual| : (i += 1) {
- const expected = case.files[i];
- try testing.expectEqualStrings(expected.name, actual.name);
- try testing.expectEqual(expected.size, actual.size);
- try testing.expectEqual(expected.kind, actual.kind);
- try testing.expectEqual(expected.mode, actual.mode);
- try testing.expectEqualStrings(expected.link_name, actual.link_name);
+ var br: std.io.Reader = .fixed(case.data);
+ var it: tar.Iterator = .init(&br, .{
+ .file_name_buffer = &file_name_buffer,
+ .link_name_buffer = &link_name_buffer,
+ });
+ var i: usize = 0;
+ while (it.next() catch |err| {
+ if (case.err) |e| {
+ try testing.expectEqual(e, err);
+ return;
+ } else {
+ return err;
+ }
+ }) |actual| : (i += 1) {
+ const expected = case.files[i];
+ try testing.expectEqualStrings(expected.name, actual.name);
+ try testing.expectEqual(expected.size, actual.size);
+ try testing.expectEqual(expected.kind, actual.kind);
+ try testing.expectEqual(expected.mode, actual.mode);
+ try testing.expectEqualStrings(expected.link_name, actual.link_name);
- if (case.chksums.len > i) {
- var md5writer = Md5Writer{};
- try actual.writeAll(&md5writer);
- const chksum = md5writer.chksum();
- try testing.expectEqualStrings(case.chksums[i], &chksum);
- } else {
- if (expected.truncated) {
- iter.unread_file_bytes = 0;
- }
+ if (case.chksums.len > i) {
+ var aw: std.Io.Writer.Allocating = .init(std.testing.allocator);
+ defer aw.deinit();
+ try it.streamRemaining(actual, &aw.writer);
+ const chksum = std.fmt.bytesToHex(std.crypto.hash.Md5.hashResult(aw.getWritten()), .lower);
+ try testing.expectEqualStrings(case.chksums[i], &chksum);
+ } else {
+ if (expected.truncated) {
+ it.unread_file_bytes = 0;
}
}
- try testing.expectEqual(case.files.len, i);
}
+ try testing.expectEqual(case.files.len, i);
}
test "pax/gnu long names with small buffer" {
+ try testLongNameCase(gnu_multi_headers_case);
+ try testLongNameCase(trailing_slash_case);
+ try testLongNameCase(.{
+ .data = @embedFile("testdata/fuzz1.tar"),
+ .err = error.TarInsufficientBuffer,
+ });
+}
+
+fn testLongNameCase(case: Case) !void {
// should fail with insufficient buffer error
var min_file_name_buffer: [256]u8 = undefined;
var min_link_name_buffer: [100]u8 = undefined;
- const long_name_cases = [_]Case{ cases[11], cases[25], cases[28] };
- for (long_name_cases) |case| {
- var fsb = std.io.fixedBufferStream(case.data);
- var iter = tar.iterator(fsb.reader(), .{
- .file_name_buffer = &min_file_name_buffer,
- .link_name_buffer = &min_link_name_buffer,
- });
+ var br: std.io.Reader = .fixed(case.data);
+ var iter: tar.Iterator = .init(&br, .{
+ .file_name_buffer = &min_file_name_buffer,
+ .link_name_buffer = &min_link_name_buffer,
+ });
- var iter_err: ?anyerror = null;
- while (iter.next() catch |err| brk: {
- iter_err = err;
- break :brk null;
- }) |_| {}
+ var iter_err: ?anyerror = null;
+ while (iter.next() catch |err| brk: {
+ iter_err = err;
+ break :brk null;
+ }) |_| {}
- try testing.expect(iter_err != null);
- try testing.expectEqual(error.TarInsufficientBuffer, iter_err.?);
- }
+ try testing.expect(iter_err != null);
+ try testing.expectEqual(error.TarInsufficientBuffer, iter_err.?);
}
test "insufficient buffer in Header name filed" {
var min_file_name_buffer: [9]u8 = undefined;
var min_link_name_buffer: [100]u8 = undefined;
- var fsb = std.io.fixedBufferStream(cases[0].data);
- var iter = tar.iterator(fsb.reader(), .{
+ var br: std.io.Reader = .fixed(gnu_case.data);
+ var iter: tar.Iterator = .init(&br, .{
.file_name_buffer = &min_file_name_buffer,
.link_name_buffer = &min_link_name_buffer,
});
@@ -466,21 +462,21 @@ test "should not overwrite existing file" {
// This ensures that file is not overwritten.
//
const data = @embedFile("testdata/overwrite_file.tar");
- var fsb = std.io.fixedBufferStream(data);
+ var r: std.io.Reader = .fixed(data);
// Unpack with strip_components = 1 should fail
var root = std.testing.tmpDir(.{});
defer root.cleanup();
try testing.expectError(
error.PathAlreadyExists,
- tar.pipeToFileSystem(root.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 1 }),
+ tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
);
// Unpack with strip_components = 0 should pass
- fsb.reset();
+ r = .fixed(data);
var root2 = std.testing.tmpDir(.{});
defer root2.cleanup();
- try tar.pipeToFileSystem(root2.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 0 });
+ try tar.pipeToFileSystem(root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
}
test "case sensitivity" {
@@ -494,12 +490,12 @@ test "case sensitivity" {
// 18089/alacritty/Darkermatrix.yml
//
const data = @embedFile("testdata/18089.tar");
- var fsb = std.io.fixedBufferStream(data);
+ var r: std.io.Reader = .fixed(data);
var root = std.testing.tmpDir(.{});
defer root.cleanup();
- tar.pipeToFileSystem(root.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
+ tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
// on case insensitive fs we fail on overwrite existing file
try testing.expectEqual(error.PathAlreadyExists, err);
return;
diff --git a/lib/std/tar/writer.zig b/lib/std/tar/writer.zig
deleted file mode 100644
index 4ced287eec..0000000000
--- a/lib/std/tar/writer.zig
+++ /dev/null
@@ -1,497 +0,0 @@
-const std = @import("std");
-const assert = std.debug.assert;
-const testing = std.testing;
-
-/// Creates tar Writer which will write tar content to the `underlying_writer`.
-/// Use setRoot to nest all following entries under single root. If file don't
-/// fit into posix header (name+prefix: 100+155 bytes) gnu extented header will
-/// be used for long names. Options enables setting file premission mode and
-/// mtime. Default is to use current time for mtime and 0o664 for file mode.
-pub fn writer(underlying_writer: anytype) Writer(@TypeOf(underlying_writer)) {
- return .{ .underlying_writer = underlying_writer };
-}
-
-pub fn Writer(comptime WriterType: type) type {
- return struct {
- const block_size = @sizeOf(Header);
- const empty_block: [block_size]u8 = [_]u8{0} ** block_size;
-
- /// Options for writing file/dir/link. If left empty 0o664 is used for
- /// file mode and current time for mtime.
- pub const Options = struct {
- /// File system permission mode.
- mode: u32 = 0,
- /// File system modification time.
- mtime: u64 = 0,
- };
- const Self = @This();
-
- underlying_writer: WriterType,
- prefix: []const u8 = "",
- mtime_now: u64 = 0,
-
- /// Sets prefix for all other write* method paths.
- pub fn setRoot(self: *Self, root: []const u8) !void {
- if (root.len > 0)
- try self.writeDir(root, .{});
-
- self.prefix = root;
- }
-
- /// Writes directory.
- pub fn writeDir(self: *Self, sub_path: []const u8, opt: Options) !void {
- try self.writeHeader(.directory, sub_path, "", 0, opt);
- }
-
- /// Writes file system file.
- pub fn writeFile(self: *Self, sub_path: []const u8, file: std.fs.File) !void {
- const stat = try file.stat();
- const mtime: u64 = @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
-
- var header = Header{};
- try self.setPath(&header, sub_path);
- try header.setSize(stat.size);
- try header.setMtime(mtime);
- try header.write(self.underlying_writer);
-
- try self.underlying_writer.writeFile(file);
- try self.writePadding(stat.size);
- }
-
- /// Writes file reading file content from `reader`. Number of bytes in
- /// reader must be equal to `size`.
- pub fn writeFileStream(self: *Self, sub_path: []const u8, size: usize, reader: anytype, opt: Options) !void {
- try self.writeHeader(.regular, sub_path, "", @intCast(size), opt);
-
- var counting_reader = std.io.countingReader(reader);
- var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
- try fifo.pump(counting_reader.reader(), self.underlying_writer);
- if (counting_reader.bytes_read != size) return error.WrongReaderSize;
- try self.writePadding(size);
- }
-
- /// Writes file using bytes buffer `content` for size and file content.
- pub fn writeFileBytes(self: *Self, sub_path: []const u8, content: []const u8, opt: Options) !void {
- try self.writeHeader(.regular, sub_path, "", @intCast(content.len), opt);
- try self.underlying_writer.writeAll(content);
- try self.writePadding(content.len);
- }
-
- /// Writes symlink.
- pub fn writeLink(self: *Self, sub_path: []const u8, link_name: []const u8, opt: Options) !void {
- try self.writeHeader(.symbolic_link, sub_path, link_name, 0, opt);
- }
-
- /// Writes fs.Dir.WalkerEntry. Uses `mtime` from file system entry and
- /// default for entry mode .
- pub fn writeEntry(self: *Self, entry: std.fs.Dir.Walker.Entry) !void {
- switch (entry.kind) {
- .directory => {
- try self.writeDir(entry.path, .{ .mtime = try entryMtime(entry) });
- },
- .file => {
- var file = try entry.dir.openFile(entry.basename, .{});
- defer file.close();
- try self.writeFile(entry.path, file);
- },
- .sym_link => {
- var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
- const link_name = try entry.dir.readLink(entry.basename, &link_name_buffer);
- try self.writeLink(entry.path, link_name, .{ .mtime = try entryMtime(entry) });
- },
- else => {
- return error.UnsupportedWalkerEntryKind;
- },
- }
- }
-
- fn writeHeader(
- self: *Self,
- typeflag: Header.FileType,
- sub_path: []const u8,
- link_name: []const u8,
- size: u64,
- opt: Options,
- ) !void {
- var header = Header.init(typeflag);
- try self.setPath(&header, sub_path);
- try header.setSize(size);
- try header.setMtime(if (opt.mtime != 0) opt.mtime else self.mtimeNow());
- if (opt.mode != 0)
- try header.setMode(opt.mode);
- if (typeflag == .symbolic_link)
- header.setLinkname(link_name) catch |err| switch (err) {
- error.NameTooLong => try self.writeExtendedHeader(.gnu_long_link, &.{link_name}),
- else => return err,
- };
- try header.write(self.underlying_writer);
- }
-
- fn mtimeNow(self: *Self) u64 {
- if (self.mtime_now == 0)
- self.mtime_now = @intCast(std.time.timestamp());
- return self.mtime_now;
- }
-
- fn entryMtime(entry: std.fs.Dir.Walker.Entry) !u64 {
- const stat = try entry.dir.statFile(entry.basename);
- return @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
- }
-
- /// Writes path in posix header, if don't fit (in name+prefix; 100+155
- /// bytes) writes it in gnu extended header.
- fn setPath(self: *Self, header: *Header, sub_path: []const u8) !void {
- header.setPath(self.prefix, sub_path) catch |err| switch (err) {
- error.NameTooLong => {
- // write extended header
- const buffers: []const []const u8 = if (self.prefix.len == 0)
- &.{sub_path}
- else
- &.{ self.prefix, "/", sub_path };
- try self.writeExtendedHeader(.gnu_long_name, buffers);
- },
- else => return err,
- };
- }
-
- /// Writes gnu extended header: gnu_long_name or gnu_long_link.
- fn writeExtendedHeader(self: *Self, typeflag: Header.FileType, buffers: []const []const u8) !void {
- var len: usize = 0;
- for (buffers) |buf|
- len += buf.len;
-
- var header = Header.init(typeflag);
- try header.setSize(len);
- try header.write(self.underlying_writer);
- for (buffers) |buf|
- try self.underlying_writer.writeAll(buf);
- try self.writePadding(len);
- }
-
- fn writePadding(self: *Self, bytes: u64) !void {
- const pos: usize = @intCast(bytes % block_size);
- if (pos == 0) return;
- try self.underlying_writer.writeAll(empty_block[pos..]);
- }
-
- /// Tar should finish with two zero blocks, but 'reasonable system must
- /// not assume that such a block exists when reading an archive' (from
- /// reference). In practice it is safe to skip this finish.
- pub fn finish(self: *Self) !void {
- try self.underlying_writer.writeAll(&empty_block);
- try self.underlying_writer.writeAll(&empty_block);
- }
- };
-}
-
-/// A struct that is exactly 512 bytes and matches tar file format. This is
-/// intended to be used for outputting tar files; for parsing there is
-/// `std.tar.Header`.
-const Header = extern struct {
- // This struct was originally copied from
- // https://github.com/mattnite/tar/blob/main/src/main.zig which is MIT
- // licensed.
- //
- // The name, linkname, magic, uname, and gname are null-terminated character
- // strings. All other fields are zero-filled octal numbers in ASCII. Each
- // numeric field of width w contains w minus 1 digits, and a null.
- // Reference: https://www.gnu.org/software/tar/manual/html_node/Standard.html
- // POSIX header: byte offset
- name: [100]u8 = [_]u8{0} ** 100, // 0
- mode: [7:0]u8 = default_mode.file, // 100
- uid: [7:0]u8 = [_:0]u8{0} ** 7, // unused 108
- gid: [7:0]u8 = [_:0]u8{0} ** 7, // unused 116
- size: [11:0]u8 = [_:0]u8{'0'} ** 11, // 124
- mtime: [11:0]u8 = [_:0]u8{'0'} ** 11, // 136
- checksum: [7:0]u8 = [_:0]u8{' '} ** 7, // 148
- typeflag: FileType = .regular, // 156
- linkname: [100]u8 = [_]u8{0} ** 100, // 157
- magic: [6]u8 = [_]u8{ 'u', 's', 't', 'a', 'r', 0 }, // 257
- version: [2]u8 = [_]u8{ '0', '0' }, // 263
- uname: [32]u8 = [_]u8{0} ** 32, // unused 265
- gname: [32]u8 = [_]u8{0} ** 32, // unused 297
- devmajor: [7:0]u8 = [_:0]u8{0} ** 7, // unused 329
- devminor: [7:0]u8 = [_:0]u8{0} ** 7, // unused 337
- prefix: [155]u8 = [_]u8{0} ** 155, // 345
- pad: [12]u8 = [_]u8{0} ** 12, // unused 500
-
- pub const FileType = enum(u8) {
- regular = '0',
- symbolic_link = '2',
- directory = '5',
- gnu_long_name = 'L',
- gnu_long_link = 'K',
- };
-
- const default_mode = struct {
- const file = [_:0]u8{ '0', '0', '0', '0', '6', '6', '4' }; // 0o664
- const dir = [_:0]u8{ '0', '0', '0', '0', '7', '7', '5' }; // 0o775
- const sym_link = [_:0]u8{ '0', '0', '0', '0', '7', '7', '7' }; // 0o777
- const other = [_:0]u8{ '0', '0', '0', '0', '0', '0', '0' }; // 0o000
- };
-
- pub fn init(typeflag: FileType) Header {
- return .{
- .typeflag = typeflag,
- .mode = switch (typeflag) {
- .directory => default_mode.dir,
- .symbolic_link => default_mode.sym_link,
- .regular => default_mode.file,
- else => default_mode.other,
- },
- };
- }
-
- pub fn setSize(self: *Header, size: u64) !void {
- try octal(&self.size, size);
- }
-
- fn octal(buf: []u8, value: u64) !void {
- var remainder: u64 = value;
- var pos: usize = buf.len;
- while (remainder > 0 and pos > 0) {
- pos -= 1;
- const c: u8 = @as(u8, @intCast(remainder % 8)) + '0';
- buf[pos] = c;
- remainder /= 8;
- if (pos == 0 and remainder > 0) return error.OctalOverflow;
- }
- }
-
- pub fn setMode(self: *Header, mode: u32) !void {
- try octal(&self.mode, mode);
- }
-
- // Integer number of seconds since January 1, 1970, 00:00 Coordinated Universal Time.
- // mtime == 0 will use current time
- pub fn setMtime(self: *Header, mtime: u64) !void {
- try octal(&self.mtime, mtime);
- }
-
- pub fn updateChecksum(self: *Header) !void {
- var checksum: usize = ' '; // other 7 self.checksum bytes are initialized to ' '
- for (std.mem.asBytes(self)) |val|
- checksum += val;
- try octal(&self.checksum, checksum);
- }
-
- pub fn write(self: *Header, output_writer: anytype) !void {
- try self.updateChecksum();
- try output_writer.writeAll(std.mem.asBytes(self));
- }
-
- pub fn setLinkname(self: *Header, link: []const u8) !void {
- if (link.len > self.linkname.len) return error.NameTooLong;
- @memcpy(self.linkname[0..link.len], link);
- }
-
- pub fn setPath(self: *Header, prefix: []const u8, sub_path: []const u8) !void {
- const max_prefix = self.prefix.len;
- const max_name = self.name.len;
- const sep = std.fs.path.sep_posix;
-
- if (prefix.len + sub_path.len > max_name + max_prefix or prefix.len > max_prefix)
- return error.NameTooLong;
-
- // both fit into name
- if (prefix.len > 0 and prefix.len + sub_path.len < max_name) {
- @memcpy(self.name[0..prefix.len], prefix);
- self.name[prefix.len] = sep;
- @memcpy(self.name[prefix.len + 1 ..][0..sub_path.len], sub_path);
- return;
- }
-
- // sub_path fits into name
- // there is no prefix or prefix fits into prefix
- if (sub_path.len <= max_name) {
- @memcpy(self.name[0..sub_path.len], sub_path);
- @memcpy(self.prefix[0..prefix.len], prefix);
- return;
- }
-
- if (prefix.len > 0) {
- @memcpy(self.prefix[0..prefix.len], prefix);
- self.prefix[prefix.len] = sep;
- }
- const prefix_pos = if (prefix.len > 0) prefix.len + 1 else 0;
-
- // add as much to prefix as you can, must split at /
- const prefix_remaining = max_prefix - prefix_pos;
- if (std.mem.lastIndexOf(u8, sub_path[0..@min(prefix_remaining, sub_path.len)], &.{'/'})) |sep_pos| {
- @memcpy(self.prefix[prefix_pos..][0..sep_pos], sub_path[0..sep_pos]);
- if ((sub_path.len - sep_pos - 1) > max_name) return error.NameTooLong;
- @memcpy(self.name[0..][0 .. sub_path.len - sep_pos - 1], sub_path[sep_pos + 1 ..]);
- return;
- }
-
- return error.NameTooLong;
- }
-
- comptime {
- assert(@sizeOf(Header) == 512);
- }
-
- test setPath {
- const cases = [_]struct {
- in: []const []const u8,
- out: []const []const u8,
- }{
- .{
- .in = &.{ "", "123456789" },
- .out = &.{ "", "123456789" },
- },
- // can fit into name
- .{
- .in = &.{ "prefix", "sub_path" },
- .out = &.{ "", "prefix/sub_path" },
- },
- // no more both fits into name
- .{
- .in = &.{ "prefix", "0123456789/" ** 8 ++ "basename" },
- .out = &.{ "prefix", "0123456789/" ** 8 ++ "basename" },
- },
- // put as much as you can into prefix the rest goes into name
- .{
- .in = &.{ "prefix", "0123456789/" ** 10 ++ "basename" },
- .out = &.{ "prefix/" ++ "0123456789/" ** 9 ++ "0123456789", "basename" },
- },
-
- .{
- .in = &.{ "prefix", "0123456789/" ** 15 ++ "basename" },
- .out = &.{ "prefix/" ++ "0123456789/" ** 12 ++ "0123456789", "0123456789/0123456789/basename" },
- },
- .{
- .in = &.{ "prefix", "0123456789/" ** 21 ++ "basename" },
- .out = &.{ "prefix/" ++ "0123456789/" ** 12 ++ "0123456789", "0123456789/" ** 8 ++ "basename" },
- },
- .{
- .in = &.{ "", "012345678/" ** 10 ++ "foo" },
- .out = &.{ "012345678/" ** 9 ++ "012345678", "foo" },
- },
- };
-
- for (cases) |case| {
- var header = Header.init(.regular);
- try header.setPath(case.in[0], case.in[1]);
- try testing.expectEqualStrings(case.out[0], str(&header.prefix));
- try testing.expectEqualStrings(case.out[1], str(&header.name));
- }
-
- const error_cases = [_]struct {
- in: []const []const u8,
- }{
- // basename can't fit into name (106 characters)
- .{ .in = &.{ "zig", "test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig" } },
- // cant fit into 255 + sep
- .{ .in = &.{ "prefix", "0123456789/" ** 22 ++ "basename" } },
- // can fit but sub_path can't be split (there is no separator)
- .{ .in = &.{ "prefix", "0123456789" ** 10 ++ "a" } },
- .{ .in = &.{ "prefix", "0123456789" ** 14 ++ "basename" } },
- };
-
- for (error_cases) |case| {
- var header = Header.init(.regular);
- try testing.expectError(
- error.NameTooLong,
- header.setPath(case.in[0], case.in[1]),
- );
- }
- }
-
- // Breaks string on first null character.
- fn str(s: []const u8) []const u8 {
- for (s, 0..) |c, i| {
- if (c == 0) return s[0..i];
- }
- return s;
- }
-};
-
-test {
- _ = Header;
-}
-
-test "write files" {
- const files = [_]struct {
- path: []const u8,
- content: []const u8,
- }{
- .{ .path = "foo", .content = "bar" },
- .{ .path = "a12345678/" ** 10 ++ "foo", .content = "a" ** 511 },
- .{ .path = "b12345678/" ** 24 ++ "foo", .content = "b" ** 512 },
- .{ .path = "c12345678/" ** 25 ++ "foo", .content = "c" ** 513 },
- .{ .path = "d12345678/" ** 51 ++ "foo", .content = "d" ** 1025 },
- .{ .path = "e123456789" ** 11, .content = "e" },
- };
-
- var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
- var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
-
- // with root
- {
- const root = "root";
-
- var output = std.ArrayList(u8).init(testing.allocator);
- defer output.deinit();
- var wrt = writer(output.writer());
- try wrt.setRoot(root);
- for (files) |file|
- try wrt.writeFileBytes(file.path, file.content, .{});
-
- var input = std.io.fixedBufferStream(output.items);
- var iter = std.tar.iterator(
- input.reader(),
- .{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },
- );
-
- // first entry is directory with prefix
- {
- const actual = (try iter.next()).?;
- try testing.expectEqualStrings(root, actual.name);
- try testing.expectEqual(std.tar.FileKind.directory, actual.kind);
- }
-
- var i: usize = 0;
- while (try iter.next()) |actual| {
- defer i += 1;
- const expected = files[i];
- try testing.expectEqualStrings(root, actual.name[0..root.len]);
- try testing.expectEqual('/', actual.name[root.len..][0]);
- try testing.expectEqualStrings(expected.path, actual.name[root.len + 1 ..]);
-
- var content = std.ArrayList(u8).init(testing.allocator);
- defer content.deinit();
- try actual.writeAll(content.writer());
- try testing.expectEqualSlices(u8, expected.content, content.items);
- }
- }
- // without root
- {
- var output = std.ArrayList(u8).init(testing.allocator);
- defer output.deinit();
- var wrt = writer(output.writer());
- for (files) |file| {
- var content = std.io.fixedBufferStream(file.content);
- try wrt.writeFileStream(file.path, file.content.len, content.reader(), .{});
- }
-
- var input = std.io.fixedBufferStream(output.items);
- var iter = std.tar.iterator(
- input.reader(),
- .{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },
- );
-
- var i: usize = 0;
- while (try iter.next()) |actual| {
- defer i += 1;
- const expected = files[i];
- try testing.expectEqualStrings(expected.path, actual.name);
-
- var content = std.ArrayList(u8).init(testing.allocator);
- defer content.deinit();
- try actual.writeAll(content.writer());
- try testing.expectEqualSlices(u8, expected.content, content.items);
- }
- try wrt.finish();
- }
-}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index f9027a4f47..e80e961b13 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -33,6 +33,7 @@ pub var log_level = std.log.Level.warn;
// Disable printing in tests for simple backends.
pub const backend_can_print = switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
.stage2_spirv,
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 486947768d..a692a63795 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -321,6 +321,27 @@ pub const BuildId = union(enum) {
try std.testing.expectError(error.InvalidCharacter, parse("0xfoobbb"));
try std.testing.expectError(error.InvalidBuildIdStyle, parse("yaddaxxx"));
}
+
+ pub fn format(id: BuildId, writer: *std.io.Writer) std.io.Writer.Error!void {
+ switch (id) {
+ .none, .fast, .uuid, .sha1, .md5 => {
+ try writer.writeAll(@tagName(id));
+ },
+ .hexstring => |hs| {
+ try writer.print("0x{x}", .{hs.toSlice()});
+ },
+ }
+ }
+
+ test format {
+ try std.testing.expectFmt("none", "{f}", .{@as(BuildId, .none)});
+ try std.testing.expectFmt("fast", "{f}", .{@as(BuildId, .fast)});
+ try std.testing.expectFmt("uuid", "{f}", .{@as(BuildId, .uuid)});
+ try std.testing.expectFmt("sha1", "{f}", .{@as(BuildId, .sha1)});
+ try std.testing.expectFmt("md5", "{f}", .{@as(BuildId, .md5)});
+ try std.testing.expectFmt("0x", "{f}", .{BuildId.initHexString("")});
+ try std.testing.expectFmt("0x1234cdef", "{f}", .{BuildId.initHexString("\x12\x34\xcd\xef")});
+ }
};
pub const LtoMode = enum { none, full, thin };
@@ -364,23 +385,23 @@ pub fn serializeCpuAlloc(ally: Allocator, cpu: std.Target.Cpu) Allocator.Error![
/// Return a Formatter for a Zig identifier, escaping it with `@""` syntax if needed.
///
/// See also `fmtIdFlags`.
-pub fn fmtId(bytes: []const u8) std.fmt.Formatter(FormatId, FormatId.render) {
- return .{ .data = .{ .bytes = bytes, .flags = .{} } };
+pub fn fmtId(bytes: []const u8) FormatId {
+ return .{ .bytes = bytes, .flags = .{} };
}
/// Return a Formatter for a Zig identifier, escaping it with `@""` syntax if needed.
///
/// See also `fmtId`.
-pub fn fmtIdFlags(bytes: []const u8, flags: FormatId.Flags) std.fmt.Formatter(FormatId, FormatId.render) {
- return .{ .data = .{ .bytes = bytes, .flags = flags } };
+pub fn fmtIdFlags(bytes: []const u8, flags: FormatId.Flags) FormatId {
+ return .{ .bytes = bytes, .flags = flags };
}
-pub fn fmtIdPU(bytes: []const u8) std.fmt.Formatter(FormatId, FormatId.render) {
- return .{ .data = .{ .bytes = bytes, .flags = .{ .allow_primitive = true, .allow_underscore = true } } };
+pub fn fmtIdPU(bytes: []const u8) FormatId {
+ return .{ .bytes = bytes, .flags = .{ .allow_primitive = true, .allow_underscore = true } };
}
-pub fn fmtIdP(bytes: []const u8) std.fmt.Formatter(FormatId, FormatId.render) {
- return .{ .data = .{ .bytes = bytes, .flags = .{ .allow_primitive = true } } };
+pub fn fmtIdP(bytes: []const u8) FormatId {
+ return .{ .bytes = bytes, .flags = .{ .allow_primitive = true } };
}
test fmtId {
@@ -426,7 +447,7 @@ pub const FormatId = struct {
};
/// Print the string as a Zig identifier, escaping it with `@""` syntax if needed.
- fn render(ctx: FormatId, writer: *Writer) Writer.Error!void {
+ pub fn format(ctx: FormatId, writer: *Writer) Writer.Error!void {
const bytes = ctx.bytes;
if (isValidId(bytes) and
(ctx.flags.allow_primitive or !std.zig.isPrimitive(bytes)) and
diff --git a/src/Builtin.zig b/src/Builtin.zig
index b2cb603f53..b4e05a6089 100644
--- a/src/Builtin.zig
+++ b/src/Builtin.zig
@@ -342,9 +342,9 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
}
// `make_path` matters because the dir hasn't actually been created yet.
- var af = try root_dir.atomicFile(sub_path, .{ .make_path = true });
+ var af = try root_dir.atomicFile(sub_path, .{ .make_path = true, .write_buffer = &.{} });
defer af.deinit();
- try af.file.writeAll(file.source.?);
+ try af.file_writer.interface.writeAll(file.source.?);
af.finish() catch |err| switch (err) {
error.AccessDenied => switch (builtin.os.tag) {
.windows => {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index b5597017c4..3796ed6acc 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1816,10 +1816,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.skip_linker_dependencies) break :s .none;
const want = options.want_compiler_rt orelse is_exe_or_dyn_lib;
if (!want) break :s .none;
- if (have_zcu) {
+ if (have_zcu and target_util.canBuildLibCompilerRt(target, use_llvm, build_options.have_llvm and use_llvm)) {
if (output_mode == .Obj) break :s .zcu;
- if (target.ofmt == .coff and target_util.zigBackend(target, use_llvm) == .stage2_x86_64)
- break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
+ if (switch (target_util.zigBackend(target, use_llvm)) {
+ else => false,
+ .stage2_aarch64, .stage2_x86_64 => target.ofmt == .coff,
+ }) break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
}
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
@@ -1850,11 +1852,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
- const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target);
+ const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target, use_llvm, build_options.have_llvm);
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
- if (have_zcu) break :s .zcu;
+ if (have_zcu and target_util.canBuildLibUbsanRt(target, use_llvm, build_options.have_llvm and use_llvm)) break :s .zcu;
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
};
@@ -3382,7 +3384,7 @@ pub fn saveState(comp: *Compilation) !void {
const gpa = comp.gpa;
- var bufs = std.ArrayList(std.posix.iovec_const).init(gpa);
+ var bufs = std.ArrayList([]const u8).init(gpa);
defer bufs.deinit();
var pt_headers = std.ArrayList(Header.PerThread).init(gpa);
@@ -3421,50 +3423,50 @@ pub fn saveState(comp: *Compilation) !void {
try bufs.ensureTotalCapacityPrecise(14 + 8 * pt_headers.items.len);
addBuf(&bufs, mem.asBytes(&header));
- addBuf(&bufs, mem.sliceAsBytes(pt_headers.items));
-
- addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.values()));
-
- addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.keys()));
- addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.values()));
- addBuf(&bufs, mem.sliceAsBytes(ip.dep_entries.items));
- addBuf(&bufs, mem.sliceAsBytes(ip.free_dep_entries.items));
+ addBuf(&bufs, @ptrCast(pt_headers.items));
+
+ addBuf(&bufs, @ptrCast(ip.src_hash_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.src_hash_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.nav_val_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.nav_val_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.nav_ty_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.nav_ty_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.interned_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.interned_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.zon_file_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.zon_file_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.embed_file_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.embed_file_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.namespace_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.namespace_deps.values()));
+ addBuf(&bufs, @ptrCast(ip.namespace_name_deps.keys()));
+ addBuf(&bufs, @ptrCast(ip.namespace_name_deps.values()));
+
+ addBuf(&bufs, @ptrCast(ip.first_dependency.keys()));
+ addBuf(&bufs, @ptrCast(ip.first_dependency.values()));
+ addBuf(&bufs, @ptrCast(ip.dep_entries.items));
+ addBuf(&bufs, @ptrCast(ip.free_dep_entries.items));
for (ip.locals, pt_headers.items) |*local, pt_header| {
if (pt_header.intern_pool.limbs_len > 0) {
- addBuf(&bufs, mem.sliceAsBytes(local.shared.limbs.view().items(.@"0")[0..pt_header.intern_pool.limbs_len]));
+ addBuf(&bufs, @ptrCast(local.shared.limbs.view().items(.@"0")[0..pt_header.intern_pool.limbs_len]));
}
if (pt_header.intern_pool.extra_len > 0) {
- addBuf(&bufs, mem.sliceAsBytes(local.shared.extra.view().items(.@"0")[0..pt_header.intern_pool.extra_len]));
+ addBuf(&bufs, @ptrCast(local.shared.extra.view().items(.@"0")[0..pt_header.intern_pool.extra_len]));
}
if (pt_header.intern_pool.items_len > 0) {
- addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.data)[0..pt_header.intern_pool.items_len]));
- addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.tag)[0..pt_header.intern_pool.items_len]));
+ addBuf(&bufs, @ptrCast(local.shared.items.view().items(.data)[0..pt_header.intern_pool.items_len]));
+ addBuf(&bufs, @ptrCast(local.shared.items.view().items(.tag)[0..pt_header.intern_pool.items_len]));
}
if (pt_header.intern_pool.string_bytes_len > 0) {
addBuf(&bufs, local.shared.strings.view().items(.@"0")[0..pt_header.intern_pool.string_bytes_len]);
}
if (pt_header.intern_pool.tracked_insts_len > 0) {
- addBuf(&bufs, mem.sliceAsBytes(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len]));
+ addBuf(&bufs, @ptrCast(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len]));
}
if (pt_header.intern_pool.files_len > 0) {
- addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.bin_digest)[0..pt_header.intern_pool.files_len]));
- addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.root_type)[0..pt_header.intern_pool.files_len]));
+ addBuf(&bufs, @ptrCast(local.shared.files.view().items(.bin_digest)[0..pt_header.intern_pool.files_len]));
+ addBuf(&bufs, @ptrCast(local.shared.files.view().items(.root_type)[0..pt_header.intern_pool.files_len]));
}
}
@@ -3482,95 +3484,95 @@ pub fn saveState(comp: *Compilation) !void {
try bufs.ensureUnusedCapacity(85);
addBuf(&bufs, wasm.string_bytes.items);
// TODO make it well-defined memory layout
- //addBuf(&bufs, mem.sliceAsBytes(wasm.objects.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.func_types.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_functions.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_globals.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_tables.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_memories.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.tag)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.offset)));
+ //addBuf(&bufs, @ptrCast(wasm.objects.items));
+ addBuf(&bufs, @ptrCast(wasm.func_types.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_function_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_function_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_functions.items));
+ addBuf(&bufs, @ptrCast(wasm.object_global_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_global_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_globals.items));
+ addBuf(&bufs, @ptrCast(wasm.object_table_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_table_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_tables.items));
+ addBuf(&bufs, @ptrCast(wasm.object_memory_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_memory_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_memories.items));
+ addBuf(&bufs, @ptrCast(wasm.object_relocations.items(.tag)));
+ addBuf(&bufs, @ptrCast(wasm.object_relocations.items(.offset)));
// TODO handle the union safety field
- //addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.pointee)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.addend)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_init_funcs.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_segments.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_datas.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.values()));
+ //addBuf(&bufs, @ptrCast(wasm.object_relocations.items(.pointee)));
+ addBuf(&bufs, @ptrCast(wasm.object_relocations.items(.addend)));
+ addBuf(&bufs, @ptrCast(wasm.object_init_funcs.items));
+ addBuf(&bufs, @ptrCast(wasm.object_data_segments.items));
+ addBuf(&bufs, @ptrCast(wasm.object_datas.items));
+ addBuf(&bufs, @ptrCast(wasm.object_data_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_data_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_custom_segments.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_custom_segments.values()));
// TODO make it well-defined memory layout
- // addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdats.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.kind)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.index)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.tag)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.offset)));
+ // addBuf(&bufs, @ptrCast(wasm.object_comdats.items));
+ addBuf(&bufs, @ptrCast(wasm.object_relocations_table.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_relocations_table.values()));
+ addBuf(&bufs, @ptrCast(wasm.object_comdat_symbols.items(.kind)));
+ addBuf(&bufs, @ptrCast(wasm.object_comdat_symbols.items(.index)));
+ addBuf(&bufs, @ptrCast(wasm.out_relocs.items(.tag)));
+ addBuf(&bufs, @ptrCast(wasm.out_relocs.items(.offset)));
// TODO handle the union safety field
- //addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.pointee)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.addend)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uav_fixups.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.nav_fixups.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.func_table_fixups.items));
+ //addBuf(&bufs, @ptrCast(wasm.out_relocs.items(.pointee)));
+ addBuf(&bufs, @ptrCast(wasm.out_relocs.items(.addend)));
+ addBuf(&bufs, @ptrCast(wasm.uav_fixups.items));
+ addBuf(&bufs, @ptrCast(wasm.nav_fixups.items));
+ addBuf(&bufs, @ptrCast(wasm.func_table_fixups.items));
if (is_obj) {
- addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.values()));
+ addBuf(&bufs, @ptrCast(wasm.navs_obj.keys()));
+ addBuf(&bufs, @ptrCast(wasm.navs_obj.values()));
+ addBuf(&bufs, @ptrCast(wasm.uavs_obj.keys()));
+ addBuf(&bufs, @ptrCast(wasm.uavs_obj.values()));
} else {
- addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.values()));
+ addBuf(&bufs, @ptrCast(wasm.navs_exe.keys()));
+ addBuf(&bufs, @ptrCast(wasm.navs_exe.values()));
+ addBuf(&bufs, @ptrCast(wasm.uavs_exe.keys()));
+ addBuf(&bufs, @ptrCast(wasm.uavs_exe.values()));
}
- addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.keys()));
+ addBuf(&bufs, @ptrCast(wasm.overaligned_uavs.keys()));
+ addBuf(&bufs, @ptrCast(wasm.overaligned_uavs.values()));
+ addBuf(&bufs, @ptrCast(wasm.zcu_funcs.keys()));
// TODO handle the union safety field
- // addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.missing_exports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.global_exports.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.functions.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.data_segments.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.globals.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.tables.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.values()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_indirect_function_set.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_import_set.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_set.keys()));
- addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.tag)));
+ // addBuf(&bufs, @ptrCast(wasm.zcu_funcs.values()));
+ addBuf(&bufs, @ptrCast(wasm.nav_exports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.nav_exports.values()));
+ addBuf(&bufs, @ptrCast(wasm.uav_exports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.uav_exports.values()));
+ addBuf(&bufs, @ptrCast(wasm.imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.missing_exports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.function_exports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.function_exports.values()));
+ addBuf(&bufs, @ptrCast(wasm.hidden_function_exports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.hidden_function_exports.values()));
+ addBuf(&bufs, @ptrCast(wasm.global_exports.items));
+ addBuf(&bufs, @ptrCast(wasm.functions.keys()));
+ addBuf(&bufs, @ptrCast(wasm.function_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.function_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.data_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.data_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.data_segments.keys()));
+ addBuf(&bufs, @ptrCast(wasm.globals.keys()));
+ addBuf(&bufs, @ptrCast(wasm.global_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.global_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.tables.keys()));
+ addBuf(&bufs, @ptrCast(wasm.table_imports.keys()));
+ addBuf(&bufs, @ptrCast(wasm.table_imports.values()));
+ addBuf(&bufs, @ptrCast(wasm.zcu_indirect_function_set.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_indirect_function_import_set.keys()));
+ addBuf(&bufs, @ptrCast(wasm.object_indirect_function_set.keys()));
+ addBuf(&bufs, @ptrCast(wasm.mir_instructions.items(.tag)));
// TODO handle the union safety field
- //addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.data)));
- addBuf(&bufs, mem.sliceAsBytes(wasm.mir_extra.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.mir_locals.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_bytes.items));
- addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_offs.items));
+ //addBuf(&bufs, @ptrCast(wasm.mir_instructions.items(.data)));
+ addBuf(&bufs, @ptrCast(wasm.mir_extra.items));
+ addBuf(&bufs, @ptrCast(wasm.mir_locals.items));
+ addBuf(&bufs, @ptrCast(wasm.tag_name_bytes.items));
+ addBuf(&bufs, @ptrCast(wasm.tag_name_offs.items));
// TODO add as header fields
// entry_resolution: FunctionImport.Resolution
@@ -3596,16 +3598,16 @@ pub fn saveState(comp: *Compilation) !void {
// Using an atomic file prevents a crash or power failure from corrupting
// the previous incremental compilation state.
- var af = try lf.emit.root_dir.handle.atomicFile(basename, .{});
+ var write_buffer: [1024]u8 = undefined;
+ var af = try lf.emit.root_dir.handle.atomicFile(basename, .{ .write_buffer = &write_buffer });
defer af.deinit();
- try af.file.pwritevAll(bufs.items, 0);
+ try af.file_writer.interface.writeVecAll(bufs.items);
try af.finish();
}
-fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void {
- // Even when len=0, the undefined pointer might cause EFAULT.
+fn addBuf(list: *std.ArrayList([]const u8), buf: []const u8) void {
if (buf.len == 0) return;
- list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len });
+ list.appendAssumeCapacity(buf);
}
/// This function is temporally single-threaded.
@@ -4862,6 +4864,9 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
};
defer tar_file.close();
+ var buffer: [1024]u8 = undefined;
+ var tar_file_writer = tar_file.writer(&buffer);
+
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty;
defer seen_table.deinit(comp.gpa);
@@ -4871,32 +4876,45 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
var i: usize = 0;
while (i < seen_table.count()) : (i += 1) {
const mod = seen_table.keys()[i];
- try comp.docsCopyModule(mod, seen_table.values()[i], tar_file);
+ try comp.docsCopyModule(mod, seen_table.values()[i], &tar_file_writer);
const deps = mod.deps.values();
try seen_table.ensureUnusedCapacity(comp.gpa, deps.len);
for (deps) |dep| seen_table.putAssumeCapacity(dep, dep.fully_qualified_name);
}
+
+ tar_file_writer.end() catch |err| {
+ return comp.lockAndSetMiscFailure(
+ .docs_copy,
+ "unable to write '{f}/sources.tar': {t}",
+ .{ docs_path, err },
+ );
+ };
}
-fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8, tar_file: fs.File) !void {
+fn docsCopyModule(
+ comp: *Compilation,
+ module: *Package.Module,
+ name: []const u8,
+ tar_file_writer: *fs.File.Writer,
+) !void {
const root = module.root;
var mod_dir = d: {
const root_dir, const sub_path = root.openInfo(comp.dirs);
break :d root_dir.openDir(sub_path, .{ .iterate = true });
} catch |err| {
- return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {s}", .{
- root.fmt(comp), @errorName(err),
- });
+ return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {t}", .{ root.fmt(comp), err });
};
defer mod_dir.close();
var walker = try mod_dir.walk(comp.gpa);
defer walker.deinit();
- var archiver = std.tar.writer(tar_file.deprecatedWriter().any());
+ var archiver: std.tar.Writer = .{ .underlying_writer = &tar_file_writer.interface };
archiver.prefix = name;
+ var buffer: [1024]u8 = undefined;
+
while (try walker.next()) |entry| {
switch (entry.kind) {
.file => {
@@ -4907,14 +4925,17 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8,
else => continue,
}
var file = mod_dir.openFile(entry.path, .{}) catch |err| {
- return comp.lockAndSetMiscFailure(.docs_copy, "unable to open '{f}{s}': {s}", .{
- root.fmt(comp), entry.path, @errorName(err),
+ return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{
+ root.fmt(comp), entry.path, err,
});
};
defer file.close();
- archiver.writeFile(entry.path, file) catch |err| {
- return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive '{f}{s}': {s}", .{
- root.fmt(comp), entry.path, @errorName(err),
+ const stat = try file.stat();
+ var file_reader: fs.File.Reader = .initSize(file, &buffer, stat.size);
+
+ archiver.writeFile(entry.path, &file_reader, stat.mtime) catch |err| {
+ return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{
+ root.fmt(comp), entry.path, err,
});
};
}
@@ -4926,9 +4947,7 @@ fn workerDocsWasm(comp: *Compilation, parent_prog_node: std.Progress.Node) void
workerDocsWasmFallible(comp, prog_node) catch |err| switch (err) {
error.SubCompilationFailed => return, // error reported already
- else => comp.lockAndSetMiscFailure(.docs_wasm, "unable to build autodocs: {s}", .{
- @errorName(err),
- }),
+ else => comp.lockAndSetMiscFailure(.docs_wasm, "unable to build autodocs: {t}", .{err}),
};
}
@@ -6206,19 +6225,20 @@ fn spawnZigRc(
return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
};
- var poller = std.io.poll(comp.gpa, enum { stdout }, .{
+ var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
+ .stderr = child.stderr.?,
});
defer poller.deinit();
- const stdout = poller.fifo(.stdout);
+ const stdout = poller.reader(.stdout);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) if (!try poller.poll()) break :poll;
- var header: std.zig.Server.Message.Header = undefined;
- assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(std.zig.Server.Message.Header));
- while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
- const body = stdout.readableSliceOfLen(header.bytes_len);
+ const MessageHeader = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(MessageHeader)) if (!try poller.poll()) break :poll;
+ const header = stdout.takeStruct(MessageHeader, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
// We expect exactly one ErrorBundle, and if any error_bundle header is
@@ -6241,13 +6261,10 @@ fn spawnZigRc(
},
else => {}, // ignore other messages
}
-
- stdout.discard(body.len);
}
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
- const stderr_reader = child.stderr.?.deprecatedReader();
- const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
+ const stderr = poller.reader(.stderr);
const term = child.wait() catch |err| {
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
@@ -6256,12 +6273,12 @@ fn spawnZigRc(
switch (term) {
.Exited => |code| {
if (code != 0) {
- log.err("zig rc failed with stderr:\n{s}", .{stderr});
+ log.err("zig rc failed with stderr:\n{s}", .{stderr.buffered()});
return comp.failWin32Resource(win32_resource, "zig rc exited with code {d}", .{code});
}
},
else => {
- log.err("zig rc terminated with stderr:\n{s}", .{stderr});
+ log.err("zig rc terminated with stderr:\n{s}", .{stderr.buffered()});
return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{});
},
}
diff --git a/src/InternPool.zig b/src/InternPool.zig
index c9036da45b..15d895aed0 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -7556,12 +7556,18 @@ fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32)
fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key {
const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0");
const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*);
+ const big_int: BigIntConst = .{
+ .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
+ .positive = positive,
+ };
return .{ .int = .{
.ty = int.ty,
- .storage = .{ .big_int = .{
- .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
- .positive = positive,
- } },
+ .storage = if (big_int.toInt(u64)) |x|
+ .{ .u64 = x }
+ else |_| if (big_int.toInt(i64)) |x|
+ .{ .i64 = x }
+ else |_|
+ .{ .big_int = big_int },
} };
}
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index a97b60a17c..6ad0030c17 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -1197,12 +1197,18 @@ fn unpackResource(
};
switch (file_type) {
- .tar => return try unpackTarball(f, tmp_directory.handle, resource.reader()),
+ .tar => {
+ var adapter_buffer: [1024]u8 = undefined;
+ var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
+ return unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
+ },
.@"tar.gz" => {
const reader = resource.reader();
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
var dcp = std.compress.gzip.decompressor(br.reader());
- return try unpackTarball(f, tmp_directory.handle, dcp.reader());
+ var adapter_buffer: [1024]u8 = undefined;
+ var adapter = dcp.reader().adaptToNewApi(&adapter_buffer);
+ return try unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
},
.@"tar.xz" => {
const gpa = f.arena.child_allocator;
@@ -1215,17 +1221,19 @@ fn unpackResource(
));
};
defer dcp.deinit();
- return try unpackTarball(f, tmp_directory.handle, dcp.reader());
+ var adapter_buffer: [1024]u8 = undefined;
+ var adapter = dcp.reader().adaptToNewApi(&adapter_buffer);
+ return try unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
},
.@"tar.zst" => {
- const window_size = std.compress.zstd.DecompressorOptions.default_window_buffer_len;
+ const window_size = std.compress.zstd.default_window_len;
const window_buffer = try f.arena.allocator().create([window_size]u8);
- const reader = resource.reader();
- var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
- var dcp = std.compress.zstd.decompressor(br.reader(), .{
- .window_buffer = window_buffer,
+ var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
+ var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
+ var decompress: std.compress.zstd.Decompress = .init(&adapter.new_interface, window_buffer, .{
+ .verify_checksum = false,
});
- return try unpackTarball(f, tmp_directory.handle, dcp.reader());
+ return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
},
.git_pack => return unpackGitPack(f, tmp_directory.handle, &resource.git) catch |err| switch (err) {
error.FetchFailed => return error.FetchFailed,
@@ -1239,7 +1247,7 @@ fn unpackResource(
}
}
-fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
+fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) RunError!UnpackResult {
const eb = &f.error_bundle;
const arena = f.arena.allocator();
@@ -1250,10 +1258,10 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackRes
.strip_components = 0,
.mode_mode = .ignore,
.exclude_empty_directories = true,
- }) catch |err| return f.fail(f.location_tok, try eb.printString(
- "unable to unpack tarball to temporary directory: {s}",
- .{@errorName(err)},
- ));
+ }) catch |err| return f.fail(
+ f.location_tok,
+ try eb.printString("unable to unpack tarball to temporary directory: {t}", .{err}),
+ );
var res: UnpackResult = .{ .root_dir = diagnostics.root_dir };
if (diagnostics.errors.items.len > 0) {
diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig
index 4d2dae904f..a8446d48a8 100644
--- a/src/Package/Fetch/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -1281,7 +1281,7 @@ pub fn indexPack(allocator: Allocator, format: Oid.Format, pack: std.fs.File, in
}
@memset(fan_out_table[fan_out_index..], count);
- var index_hashed_writer = std.compress.hashedWriter(index_writer, Oid.Hasher.init(format));
+ var index_hashed_writer = hashedWriter(index_writer, Oid.Hasher.init(format));
const writer = index_hashed_writer.writer();
try writer.writeAll(IndexHeader.signature);
try writer.writeInt(u32, IndexHeader.supported_version, .big);
@@ -1331,7 +1331,7 @@ fn indexPackFirstPass(
) !Oid {
var pack_buffered_reader = std.io.bufferedReader(pack.deprecatedReader());
var pack_counting_reader = std.io.countingReader(pack_buffered_reader.reader());
- var pack_hashed_reader = std.compress.hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format));
+ var pack_hashed_reader = hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format));
const pack_reader = pack_hashed_reader.reader();
const pack_header = try PackHeader.read(pack_reader);
@@ -1339,13 +1339,13 @@ fn indexPackFirstPass(
var current_entry: u32 = 0;
while (current_entry < pack_header.total_objects) : (current_entry += 1) {
const entry_offset = pack_counting_reader.bytes_read;
- var entry_crc32_reader = std.compress.hashedReader(pack_reader, std.hash.Crc32.init());
+ var entry_crc32_reader = hashedReader(pack_reader, std.hash.Crc32.init());
const entry_header = try EntryHeader.read(format, entry_crc32_reader.reader());
switch (entry_header) {
.commit, .tree, .blob, .tag => |object| {
var entry_decompress_stream = std.compress.zlib.decompressor(entry_crc32_reader.reader());
var entry_counting_reader = std.io.countingReader(entry_decompress_stream.reader());
- var entry_hashed_writer = std.compress.hashedWriter(std.io.null_writer, Oid.Hasher.init(format));
+ var entry_hashed_writer = hashedWriter(std.io.null_writer, Oid.Hasher.init(format));
const entry_writer = entry_hashed_writer.writer();
// The object header is not included in the pack data but is
// part of the object's ID
@@ -1432,7 +1432,7 @@ fn indexPackHashDelta(
const base_data = try resolveDeltaChain(allocator, format, pack, base_object, delta_offsets.items, cache);
var entry_hasher: Oid.Hasher = .init(format);
- var entry_hashed_writer = std.compress.hashedWriter(std.io.null_writer, &entry_hasher);
+ var entry_hashed_writer = hashedWriter(std.io.null_writer, &entry_hasher);
try entry_hashed_writer.writer().print("{s} {}\x00", .{ @tagName(base_object.type), base_data.len });
entry_hasher.update(base_data);
return entry_hasher.finalResult();
@@ -1703,3 +1703,58 @@ pub fn main() !void {
std.debug.print("Diagnostic: {}\n", .{err});
}
}
+
+/// Deprecated
+fn hashedReader(reader: anytype, hasher: anytype) HashedReader(@TypeOf(reader), @TypeOf(hasher)) {
+ return .{ .child_reader = reader, .hasher = hasher };
+}
+
+/// Deprecated
+fn HashedReader(ReaderType: type, HasherType: type) type {
+ return struct {
+ child_reader: ReaderType,
+ hasher: HasherType,
+
+ pub const Error = ReaderType.Error;
+ pub const Reader = std.io.GenericReader(*@This(), Error, read);
+
+ pub fn read(self: *@This(), buf: []u8) Error!usize {
+ const amt = try self.child_reader.read(buf);
+ self.hasher.update(buf[0..amt]);
+ return amt;
+ }
+
+ pub fn reader(self: *@This()) Reader {
+ return .{ .context = self };
+ }
+ };
+}
+
+/// Deprecated
+pub fn HashedWriter(WriterType: type, HasherType: type) type {
+ return struct {
+ child_writer: WriterType,
+ hasher: HasherType,
+
+ pub const Error = WriterType.Error;
+ pub const Writer = std.io.GenericWriter(*@This(), Error, write);
+
+ pub fn write(self: *@This(), buf: []const u8) Error!usize {
+ const amt = try self.child_writer.write(buf);
+ self.hasher.update(buf[0..amt]);
+ return amt;
+ }
+
+ pub fn writer(self: *@This()) Writer {
+ return .{ .context = self };
+ }
+ };
+}
+
+/// Deprecated
+pub fn hashedWriter(
+ writer: anytype,
+ hasher: anytype,
+) HashedWriter(@TypeOf(writer), @TypeOf(hasher)) {
+ return .{ .child_writer = writer, .hasher = hasher };
+}
diff --git a/src/Package/Module.zig b/src/Package/Module.zig
index d829b397ba..1c941f51f4 100644
--- a/src/Package/Module.zig
+++ b/src/Package/Module.zig
@@ -250,7 +250,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
};
const stack_check = b: {
- if (!target_util.supportsStackProbing(target)) {
+ if (!target_util.supportsStackProbing(target, zig_backend)) {
if (options.inherited.stack_check == true)
return error.StackCheckUnsupportedByTarget;
break :b false;
diff --git a/src/Sema.zig b/src/Sema.zig
index d2e3e32214..93740589bc 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -16522,7 +16522,7 @@ fn zirAsm(
break :empty try sema.structInitEmpty(block, clobbers_ty, src, src);
} else try sema.resolveInst(extra.data.clobbers); // Already coerced by AstGen.
const clobbers_val = try sema.resolveConstDefinedValue(block, src, clobbers, .{ .simple = .clobber });
- needed_capacity += (asm_source.len + 3) / 4;
+ needed_capacity += asm_source.len / 4 + 1;
const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
@@ -16562,7 +16562,8 @@ fn zirAsm(
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
- sema.air_extra.items.len += (asm_source.len + 3) / 4;
+ buffer[asm_source.len] = 0;
+ sema.air_extra.items.len += asm_source.len / 4 + 1;
}
return asm_air;
}
@@ -22482,11 +22483,18 @@ fn ptrCastFull(
.slice => {},
.many, .c, .one => break :len null,
}
- // `null` means the operand is a runtime-known slice (so the length is runtime-known).
- const opt_src_len: ?u64 = switch (src_info.flags.size) {
- .one => 1,
- .slice => src_len: {
- const operand_val = try sema.resolveValue(operand) orelse break :src_len null;
+ // A `null` length means the operand is a runtime-known slice (so the length is runtime-known).
+ // `src_elem_type` is different from `src_info.child` if the latter is an array, to ensure we ignore sentinels.
+ const src_elem_ty: Type, const opt_src_len: ?u64 = switch (src_info.flags.size) {
+ .one => src: {
+ const true_child: Type = .fromInterned(src_info.child);
+ break :src switch (true_child.zigTypeTag(zcu)) {
+ .array => .{ true_child.childType(zcu), true_child.arrayLen(zcu) },
+ else => .{ true_child, 1 },
+ };
+ },
+ .slice => src: {
+ const operand_val = try sema.resolveValue(operand) orelse break :src .{ .fromInterned(src_info.child), null };
if (operand_val.isUndef(zcu)) break :len .undef;
const slice_val = switch (operand_ty.zigTypeTag(zcu)) {
.optional => operand_val.optionalValue(zcu) orelse break :len .undef,
@@ -22495,14 +22503,13 @@ fn ptrCastFull(
};
const slice_len_resolved = try sema.resolveLazyValue(.fromInterned(zcu.intern_pool.sliceLen(slice_val.toIntern())));
if (slice_len_resolved.isUndef(zcu)) break :len .undef;
- break :src_len slice_len_resolved.toUnsignedInt(zcu);
+ break :src .{ .fromInterned(src_info.child), slice_len_resolved.toUnsignedInt(zcu) };
},
.many, .c => {
return sema.fail(block, src, "cannot infer length of slice from {s}", .{pointerSizeString(src_info.flags.size)});
},
};
const dest_elem_ty: Type = .fromInterned(dest_info.child);
- const src_elem_ty: Type = .fromInterned(src_info.child);
if (dest_elem_ty.toIntern() == src_elem_ty.toIntern()) {
break :len if (opt_src_len) |l| .{ .constant = l } else .equal_runtime_src_slice;
}
@@ -22518,7 +22525,7 @@ fn ptrCastFull(
const bytes = src_len * src_elem_size;
const dest_len = std.math.divExact(u64, bytes, dest_elem_size) catch switch (src_info.flags.size) {
.slice => return sema.fail(block, src, "slice length '{d}' does not divide exactly into destination elements", .{src_len}),
- .one => return sema.fail(block, src, "type '{f}' does not divide exactly into destination elements", .{src_elem_ty.fmt(pt)}),
+ .one => return sema.fail(block, src, "type '{f}' does not divide exactly into destination elements", .{Type.fromInterned(src_info.child).fmt(pt)}),
else => unreachable,
};
break :len .{ .constant = dest_len };
@@ -24846,7 +24853,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.@"packed" => {
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
+ (if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch
return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
@@ -24873,7 +24880,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
// Logic lifted from type computation above - I'm just assuming it's correct.
// `catch unreachable` since error case handled above.
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
+ zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable;
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
diff --git a/src/Type.zig b/src/Type.zig
index a199811c8e..9316bec11e 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -4166,7 +4166,7 @@ pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
pub fn smallestUnsignedBits(max: u64) u16 {
return switch (max) {
0 => 0,
- else => 1 + std.math.log2_int(u64, max),
+ else => @as(u16, 1) + std.math.log2_int(u64, max),
};
}
diff --git a/src/Zcu.zig b/src/Zcu.zig
index d337f0b943..df35777231 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -3891,6 +3891,29 @@ pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructTyp
return s;
}
+/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
+/// into the packed struct InternPool data rather than computing this on the
+/// fly, however it was found to perform worse when measured on real world
+/// projects.
+pub fn structPackedFieldBitOffset(
+ zcu: *Zcu,
+ struct_type: InternPool.LoadedStructType,
+ field_index: u32,
+) u16 {
+ const ip = &zcu.intern_pool;
+ assert(struct_type.layout == .@"packed");
+ assert(struct_type.haveLayout(ip));
+ var bit_sum: u64 = 0;
+ for (0..struct_type.field_types.len) |i| {
+ if (i == field_index) {
+ return @intCast(bit_sum);
+ }
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ bit_sum += field_ty.bitSize(zcu);
+ }
+ unreachable; // index out of bounds
+}
+
pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
@@ -4436,11 +4459,7 @@ pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enu
else => false,
},
.stage2_aarch64 => switch (cc) {
- .aarch64_aapcs,
- .aarch64_aapcs_darwin,
- .aarch64_aapcs_win,
- => |opts| opts.incoming_stack_alignment == null,
- .naked => true,
+ .aarch64_aapcs, .aarch64_aapcs_darwin, .naked => true,
else => false,
},
.stage2_x86 => switch (cc) {
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 26f008e1c8..119b742a89 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -3737,30 +3737,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
}
}
-/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
-/// into the packed struct InternPool data rather than computing this on the
-/// fly, however it was found to perform worse when measured on real world
-/// projects.
-pub fn structPackedFieldBitOffset(
- pt: Zcu.PerThread,
- struct_type: InternPool.LoadedStructType,
- field_index: u32,
-) u16 {
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- assert(struct_type.layout == .@"packed");
- assert(struct_type.haveLayout(ip));
- var bit_sum: u64 = 0;
- for (0..struct_type.field_types.len) |i| {
- if (i == field_index) {
- return @intCast(bit_sum);
- }
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- bit_sum += field_ty.bitSize(zcu);
- }
- unreachable; // index out of bounds
-}
-
pub fn navPtrType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@@ -4381,8 +4357,11 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
try air.legalize(pt, features);
}
- var liveness: Air.Liveness = try .analyze(zcu, air.*, ip);
- defer liveness.deinit(gpa);
+ var liveness: ?Air.Liveness = if (codegen.wantsLiveness(pt, nav))
+ try .analyze(zcu, air.*, ip)
+ else
+ null;
+ defer if (liveness) |*l| l.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
const stderr = std.debug.lockStderrWriter(&.{});
@@ -4392,12 +4371,12 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
}
- if (std.debug.runtime_safety) {
+ if (std.debug.runtime_safety) verify_liveness: {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
- .liveness = liveness,
+ .liveness = liveness orelse break :verify_liveness,
.intern_pool = ip,
};
defer verify.deinit();
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
deleted file mode 100644
index 9c4227a712..0000000000
--- a/src/arch/aarch64/bits.zig
+++ /dev/null
@@ -1,2063 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-
-/// Disjoint sets of registers. Every register must belong to
-/// exactly one register class.
-pub const RegisterClass = enum {
- general_purpose,
- stack_pointer,
- floating_point,
-};
-
-/// Registers in the AArch64 instruction set
-pub const Register = enum(u8) {
- // zig fmt: off
- // 64-bit general-purpose registers
- x0, x1, x2, x3, x4, x5, x6, x7,
- x8, x9, x10, x11, x12, x13, x14, x15,
- x16, x17, x18, x19, x20, x21, x22, x23,
- x24, x25, x26, x27, x28, x29, x30, xzr,
-
- // 32-bit general-purpose registers
- w0, w1, w2, w3, w4, w5, w6, w7,
- w8, w9, w10, w11, w12, w13, w14, w15,
- w16, w17, w18, w19, w20, w21, w22, w23,
- w24, w25, w26, w27, w28, w29, w30, wzr,
-
- // Stack pointer
- sp, wsp,
-
- // 128-bit floating-point registers
- q0, q1, q2, q3, q4, q5, q6, q7,
- q8, q9, q10, q11, q12, q13, q14, q15,
- q16, q17, q18, q19, q20, q21, q22, q23,
- q24, q25, q26, q27, q28, q29, q30, q31,
-
- // 64-bit floating-point registers
- d0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- d16, d17, d18, d19, d20, d21, d22, d23,
- d24, d25, d26, d27, d28, d29, d30, d31,
-
- // 32-bit floating-point registers
- s0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
-
- // 16-bit floating-point registers
- h0, h1, h2, h3, h4, h5, h6, h7,
- h8, h9, h10, h11, h12, h13, h14, h15,
- h16, h17, h18, h19, h20, h21, h22, h23,
- h24, h25, h26, h27, h28, h29, h30, h31,
-
- // 8-bit floating-point registers
- b0, b1, b2, b3, b4, b5, b6, b7,
- b8, b9, b10, b11, b12, b13, b14, b15,
- b16, b17, b18, b19, b20, b21, b22, b23,
- b24, b25, b26, b27, b28, b29, b30, b31,
- // zig fmt: on
-
- pub fn class(self: Register) RegisterClass {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => .general_purpose,
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => .general_purpose,
-
- @intFromEnum(Register.sp) => .stack_pointer,
- @intFromEnum(Register.wsp) => .stack_pointer,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => .floating_point,
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => .floating_point,
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => .floating_point,
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => .floating_point,
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => .floating_point,
- else => unreachable,
- };
- }
-
- pub fn id(self: Register) u6 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
-
- @intFromEnum(Register.sp) => 32,
- @intFromEnum(Register.wsp) => 32,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)),
- else => unreachable,
- };
- }
-
- pub fn enc(self: Register) u5 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
-
- @intFromEnum(Register.sp) => 31,
- @intFromEnum(Register.wsp) => 31,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))),
- else => unreachable,
- };
- }
-
- /// Returns the bit-width of the register.
- pub fn size(self: Register) u8 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => 64,
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => 32,
-
- @intFromEnum(Register.sp) => 64,
- @intFromEnum(Register.wsp) => 32,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => 128,
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => 64,
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => 32,
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => 16,
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => 8,
- else => unreachable,
- };
- }
-
- /// Convert from a general-purpose register to its 64 bit alias.
- pub fn toX(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)),
- ),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a general-purpose register to its 32 bit alias.
- pub fn toW(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)),
- ),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 128 bit alias.
- pub fn toQ(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 64 bit alias.
- pub fn toD(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 32 bit alias.
- pub fn toS(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 16 bit alias.
- pub fn toH(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 8 bit alias.
- pub fn toB(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)),
- ),
- else => unreachable,
- };
- }
-
- pub fn dwarfNum(self: Register) u5 {
- return self.enc();
- }
-};
-
-test "Register.enc" {
- try testing.expectEqual(@as(u5, 0), Register.x0.enc());
- try testing.expectEqual(@as(u5, 0), Register.w0.enc());
-
- try testing.expectEqual(@as(u5, 31), Register.xzr.enc());
- try testing.expectEqual(@as(u5, 31), Register.wzr.enc());
-
- try testing.expectEqual(@as(u5, 31), Register.sp.enc());
- try testing.expectEqual(@as(u5, 31), Register.sp.enc());
-}
-
-test "Register.size" {
- try testing.expectEqual(@as(u8, 64), Register.x19.size());
- try testing.expectEqual(@as(u8, 32), Register.w3.size());
-}
-
-test "Register.toX/toW" {
- try testing.expectEqual(Register.x0, Register.w0.toX());
- try testing.expectEqual(Register.x0, Register.x0.toX());
-
- try testing.expectEqual(Register.w3, Register.w3.toW());
- try testing.expectEqual(Register.w3, Register.x3.toW());
-}
-
-/// Represents an instruction in the AArch64 instruction set
-pub const Instruction = union(enum) {
- move_wide_immediate: packed struct {
- rd: u5,
- imm16: u16,
- hw: u2,
- fixed: u6 = 0b100101,
- opc: u2,
- sf: u1,
- },
- pc_relative_address: packed struct {
- rd: u5,
- immhi: u19,
- fixed: u5 = 0b10000,
- immlo: u2,
- op: u1,
- },
- load_store_register: packed struct {
- rt: u5,
- rn: u5,
- offset: u12,
- opc: u2,
- op1: u2,
- v: u1,
- fixed: u3 = 0b111,
- size: u2,
- },
- load_store_register_pair: packed struct {
- rt1: u5,
- rn: u5,
- rt2: u5,
- imm7: u7,
- load: u1,
- encoding: u2,
- fixed: u5 = 0b101_0_0,
- opc: u2,
- },
- load_literal: packed struct {
- rt: u5,
- imm19: u19,
- fixed: u6 = 0b011_0_00,
- opc: u2,
- },
- exception_generation: packed struct {
- ll: u2,
- op2: u3,
- imm16: u16,
- opc: u3,
- fixed: u8 = 0b1101_0100,
- },
- unconditional_branch_register: packed struct {
- op4: u5,
- rn: u5,
- op3: u6,
- op2: u5,
- opc: u4,
- fixed: u7 = 0b1101_011,
- },
- unconditional_branch_immediate: packed struct {
- imm26: u26,
- fixed: u5 = 0b00101,
- op: u1,
- },
- no_operation: packed struct {
- fixed: u32 = 0b1101010100_0_00_011_0010_0000_000_11111,
- },
- logical_shifted_register: packed struct {
- rd: u5,
- rn: u5,
- imm6: u6,
- rm: u5,
- n: u1,
- shift: u2,
- fixed: u5 = 0b01010,
- opc: u2,
- sf: u1,
- },
- add_subtract_immediate: packed struct {
- rd: u5,
- rn: u5,
- imm12: u12,
- sh: u1,
- fixed: u6 = 0b100010,
- s: u1,
- op: u1,
- sf: u1,
- },
- logical_immediate: packed struct {
- rd: u5,
- rn: u5,
- imms: u6,
- immr: u6,
- n: u1,
- fixed: u6 = 0b100100,
- opc: u2,
- sf: u1,
- },
- bitfield: packed struct {
- rd: u5,
- rn: u5,
- imms: u6,
- immr: u6,
- n: u1,
- fixed: u6 = 0b100110,
- opc: u2,
- sf: u1,
- },
- add_subtract_shifted_register: packed struct {
- rd: u5,
- rn: u5,
- imm6: u6,
- rm: u5,
- fixed_1: u1 = 0b0,
- shift: u2,
- fixed_2: u5 = 0b01011,
- s: u1,
- op: u1,
- sf: u1,
- },
- add_subtract_extended_register: packed struct {
- rd: u5,
- rn: u5,
- imm3: u3,
- option: u3,
- rm: u5,
- fixed: u8 = 0b01011_00_1,
- s: u1,
- op: u1,
- sf: u1,
- },
- conditional_branch: struct {
- cond: u4,
- o0: u1,
- imm19: u19,
- o1: u1,
- fixed: u7 = 0b0101010,
- },
- compare_and_branch: struct {
- rt: u5,
- imm19: u19,
- op: u1,
- fixed: u6 = 0b011010,
- sf: u1,
- },
- conditional_select: struct {
- rd: u5,
- rn: u5,
- op2: u2,
- cond: u4,
- rm: u5,
- fixed: u8 = 0b11010100,
- s: u1,
- op: u1,
- sf: u1,
- },
- data_processing_3_source: packed struct {
- rd: u5,
- rn: u5,
- ra: u5,
- o0: u1,
- rm: u5,
- op31: u3,
- fixed: u5 = 0b11011,
- op54: u2,
- sf: u1,
- },
- data_processing_2_source: packed struct {
- rd: u5,
- rn: u5,
- opcode: u6,
- rm: u5,
- fixed_1: u8 = 0b11010110,
- s: u1,
- fixed_2: u1 = 0b0,
- sf: u1,
- },
-
- pub const Condition = enum(u4) {
- /// Integer: Equal
- /// Floating point: Equal
- eq,
- /// Integer: Not equal
- /// Floating point: Not equal or unordered
- ne,
- /// Integer: Carry set
- /// Floating point: Greater than, equal, or unordered
- cs,
- /// Integer: Carry clear
- /// Floating point: Less than
- cc,
- /// Integer: Minus, negative
- /// Floating point: Less than
- mi,
- /// Integer: Plus, positive or zero
- /// Floating point: Greater than, equal, or unordered
- pl,
- /// Integer: Overflow
- /// Floating point: Unordered
- vs,
- /// Integer: No overflow
- /// Floating point: Ordered
- vc,
- /// Integer: Unsigned higher
- /// Floating point: Greater than, or unordered
- hi,
- /// Integer: Unsigned lower or same
- /// Floating point: Less than or equal
- ls,
- /// Integer: Signed greater than or equal
- /// Floating point: Greater than or equal
- ge,
- /// Integer: Signed less than
- /// Floating point: Less than, or unordered
- lt,
- /// Integer: Signed greater than
- /// Floating point: Greater than
- gt,
- /// Integer: Signed less than or equal
- /// Floating point: Less than, equal, or unordered
- le,
- /// Integer: Always
- /// Floating point: Always
- al,
- /// Integer: Always
- /// Floating point: Always
- nv,
-
- /// Converts a std.math.CompareOperator into a condition flag,
- /// i.e. returns the condition that is true iff the result of the
- /// comparison is true. Assumes signed comparison
- pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition {
- return switch (op) {
- .gte => .ge,
- .gt => .gt,
- .neq => .ne,
- .lt => .lt,
- .lte => .le,
- .eq => .eq,
- };
- }
-
- /// Converts a std.math.CompareOperator into a condition flag,
- /// i.e. returns the condition that is true iff the result of the
- /// comparison is true. Assumes unsigned comparison
- pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition {
- return switch (op) {
- .gte => .cs,
- .gt => .hi,
- .neq => .ne,
- .lt => .cc,
- .lte => .ls,
- .eq => .eq,
- };
- }
-
- /// Returns the condition which is true iff the given condition is
- /// false (if such a condition exists)
- pub fn negate(cond: Condition) Condition {
- return switch (cond) {
- .eq => .ne,
- .ne => .eq,
- .cs => .cc,
- .cc => .cs,
- .mi => .pl,
- .pl => .mi,
- .vs => .vc,
- .vc => .vs,
- .hi => .ls,
- .ls => .hi,
- .ge => .lt,
- .lt => .ge,
- .gt => .le,
- .le => .gt,
- .al => unreachable,
- .nv => unreachable,
- };
- }
- };
-
- pub fn toU32(self: Instruction) u32 {
- return switch (self) {
- .move_wide_immediate => |v| @as(u32, @bitCast(v)),
- .pc_relative_address => |v| @as(u32, @bitCast(v)),
- .load_store_register => |v| @as(u32, @bitCast(v)),
- .load_store_register_pair => |v| @as(u32, @bitCast(v)),
- .load_literal => |v| @as(u32, @bitCast(v)),
- .exception_generation => |v| @as(u32, @bitCast(v)),
- .unconditional_branch_register => |v| @as(u32, @bitCast(v)),
- .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)),
- .no_operation => |v| @as(u32, @bitCast(v)),
- .logical_shifted_register => |v| @as(u32, @bitCast(v)),
- .add_subtract_immediate => |v| @as(u32, @bitCast(v)),
- .logical_immediate => |v| @as(u32, @bitCast(v)),
- .bitfield => |v| @as(u32, @bitCast(v)),
- .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)),
- .add_subtract_extended_register => |v| @as(u32, @bitCast(v)),
- // TODO once packed structs work, this can be refactored
- .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
- .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
- .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
- .data_processing_3_source => |v| @as(u32, @bitCast(v)),
- .data_processing_2_source => |v| @as(u32, @bitCast(v)),
- };
- }
-
- fn moveWideImmediate(
- opc: u2,
- rd: Register,
- imm16: u16,
- shift: u6,
- ) Instruction {
- assert(shift % 16 == 0);
- assert(!(rd.size() == 32 and shift > 16));
- assert(!(rd.size() == 64 and shift > 48));
-
- return Instruction{
- .move_wide_immediate = .{
- .rd = rd.enc(),
- .imm16 = imm16,
- .hw = @as(u2, @intCast(shift / 16)),
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0,
- 64 => 1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
- assert(rd.size() == 64);
- const imm21_u = @as(u21, @bitCast(imm21));
- return Instruction{
- .pc_relative_address = .{
- .rd = rd.enc(),
- .immlo = @as(u2, @truncate(imm21_u)),
- .immhi = @as(u19, @truncate(imm21_u >> 2)),
- .op = op,
- },
- };
- }
-
- pub const LoadStoreOffsetImmediate = union(enum) {
- post_index: i9,
- pre_index: i9,
- unsigned: u12,
- };
-
- pub const LoadStoreOffsetRegister = struct {
- rm: u5,
- shift: union(enum) {
- uxtw: u2,
- lsl: u2,
- sxtw: u2,
- sxtx: u2,
- },
- };
-
- /// Represents the offset operand of a load or store instruction.
- /// Data can be loaded from memory with either an immediate offset
- /// or an offset that is stored in some register.
- pub const LoadStoreOffset = union(enum) {
- immediate: LoadStoreOffsetImmediate,
- register: LoadStoreOffsetRegister,
-
- pub const none = LoadStoreOffset{
- .immediate = .{ .unsigned = 0 },
- };
-
- pub fn toU12(self: LoadStoreOffset) u12 {
- return switch (self) {
- .immediate => |imm_type| switch (imm_type) {
- .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1,
- .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3,
- .unsigned => |v| v,
- },
- .register => |r| switch (r.shift) {
- .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050,
- .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050,
- .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050,
- .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050,
- },
- };
- }
-
- pub fn imm(offset: u12) LoadStoreOffset {
- return .{
- .immediate = .{ .unsigned = offset },
- };
- }
-
- pub fn imm_post_index(offset: i9) LoadStoreOffset {
- return .{
- .immediate = .{ .post_index = offset },
- };
- }
-
- pub fn imm_pre_index(offset: i9) LoadStoreOffset {
- return .{
- .immediate = .{ .pre_index = offset },
- };
- }
-
- pub fn reg(rm: Register) LoadStoreOffset {
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .lsl = 0,
- },
- },
- };
- }
-
- pub fn reg_uxtw(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 32 and (shift == 0 or shift == 2));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .uxtw = shift,
- },
- },
- };
- }
-
- pub fn reg_lsl(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 64 and (shift == 0 or shift == 3));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .lsl = shift,
- },
- },
- };
- }
-
- pub fn reg_sxtw(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 32 and (shift == 0 or shift == 2));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .sxtw = shift,
- },
- },
- };
- }
-
- pub fn reg_sxtx(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 64 and (shift == 0 or shift == 3));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .sxtx = shift,
- },
- },
- };
- }
- };
-
- /// Which kind of load/store to perform
- const LoadStoreVariant = enum {
- /// 32 bits or 64 bits
- str,
- /// 8 bits, zero-extended
- strb,
- /// 16 bits, zero-extended
- strh,
- /// 32 bits or 64 bits
- ldr,
- /// 8 bits, zero-extended
- ldrb,
- /// 16 bits, zero-extended
- ldrh,
- /// 8 bits, sign extended
- ldrsb,
- /// 16 bits, sign extended
- ldrsh,
- /// 32 bits, sign extended
- ldrsw,
- };
-
- fn loadStoreRegister(
- rt: Register,
- rn: Register,
- offset: LoadStoreOffset,
- variant: LoadStoreVariant,
- ) Instruction {
- assert(rn.size() == 64);
- assert(rn.id() != Register.xzr.id());
-
- const off = offset.toU12();
-
- const op1: u2 = blk: {
- switch (offset) {
- .immediate => |imm| switch (imm) {
- .unsigned => break :blk 0b01,
- else => {},
- },
- else => {},
- }
- break :blk 0b00;
- };
-
- const opc: u2 = blk: {
- switch (variant) {
- .ldr, .ldrh, .ldrb => break :blk 0b01,
- .str, .strh, .strb => break :blk 0b00,
- .ldrsb,
- .ldrsh,
- => switch (rt.size()) {
- 32 => break :blk 0b11,
- 64 => break :blk 0b10,
- else => unreachable, // unexpected register size
- },
- .ldrsw => break :blk 0b10,
- }
- };
-
- const size: u2 = blk: {
- switch (variant) {
- .ldr, .str => switch (rt.size()) {
- 32 => break :blk 0b10,
- 64 => break :blk 0b11,
- else => unreachable, // unexpected register size
- },
- .ldrsw => break :blk 0b10,
- .ldrh, .ldrsh, .strh => break :blk 0b01,
- .ldrb, .ldrsb, .strb => break :blk 0b00,
- }
- };
-
- return Instruction{
- .load_store_register = .{
- .rt = rt.enc(),
- .rn = rn.enc(),
- .offset = off,
- .opc = opc,
- .op1 = op1,
- .v = 0,
- .size = size,
- },
- };
- }
-
- fn loadStoreRegisterPair(
- rt1: Register,
- rt2: Register,
- rn: Register,
- offset: i9,
- encoding: u2,
- load: bool,
- ) Instruction {
- assert(rn.size() == 64);
- assert(rn.id() != Register.xzr.id());
-
- switch (rt1.size()) {
- 32 => {
- assert(-256 <= offset and offset <= 252);
- const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2))));
- return Instruction{
- .load_store_register_pair = .{
- .rt1 = rt1.enc(),
- .rn = rn.enc(),
- .rt2 = rt2.enc(),
- .imm7 = imm7,
- .load = @intFromBool(load),
- .encoding = encoding,
- .opc = 0b00,
- },
- };
- },
- 64 => {
- assert(-512 <= offset and offset <= 504);
- const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3))));
- return Instruction{
- .load_store_register_pair = .{
- .rt1 = rt1.enc(),
- .rn = rn.enc(),
- .rt2 = rt2.enc(),
- .imm7 = imm7,
- .load = @intFromBool(load),
- .encoding = encoding,
- .opc = 0b10,
- },
- };
- },
- else => unreachable, // unexpected register size
- }
- }
-
- fn loadLiteral(rt: Register, imm19: u19) Instruction {
- return Instruction{
- .load_literal = .{
- .rt = rt.enc(),
- .imm19 = imm19,
- .opc = switch (rt.size()) {
- 32 => 0b00,
- 64 => 0b01,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn exceptionGeneration(
- opc: u3,
- op2: u3,
- ll: u2,
- imm16: u16,
- ) Instruction {
- return Instruction{
- .exception_generation = .{
- .ll = ll,
- .op2 = op2,
- .imm16 = imm16,
- .opc = opc,
- },
- };
- }
-
- fn unconditionalBranchRegister(
- opc: u4,
- op2: u5,
- op3: u6,
- rn: Register,
- op4: u5,
- ) Instruction {
- assert(rn.size() == 64);
-
- return Instruction{
- .unconditional_branch_register = .{
- .op4 = op4,
- .rn = rn.enc(),
- .op3 = op3,
- .op2 = op2,
- .opc = opc,
- },
- };
- }
-
- fn unconditionalBranchImmediate(
- op: u1,
- offset: i28,
- ) Instruction {
- return Instruction{
- .unconditional_branch_immediate = .{
- .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))),
- .op = op,
- },
- };
- }
-
- pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror };
-
- fn logicalShiftedRegister(
- opc: u2,
- n: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
- if (rd.size() == 32) assert(amount < 32);
-
- return Instruction{
- .logical_shifted_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm6 = amount,
- .rm = rm.enc(),
- .n = n,
- .shift = @intFromEnum(shift),
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable,
- },
- },
- };
- }
-
- fn addSubtractImmediate(
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- imm12: u12,
- shift: bool,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rn.id() != Register.xzr.id());
-
- return Instruction{
- .add_subtract_immediate = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm12 = imm12,
- .sh = @intFromBool(shift),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn logicalImmediate(
- opc: u2,
- rd: Register,
- rn: Register,
- imms: u6,
- immr: u6,
- n: u1,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(!(rd.size() == 32 and n != 0));
-
- return Instruction{
- .logical_immediate = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imms = imms,
- .immr = immr,
- .n = n,
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn initBitfield(
- opc: u2,
- n: u1,
- rd: Register,
- rn: Register,
- immr: u6,
- imms: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(!(rd.size() == 64 and n != 1));
- assert(!(rd.size() == 32 and (n != 0 or immr >> 5 != 0 or immr >> 5 != 0)));
-
- return Instruction{
- .bitfield = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imms = imms,
- .immr = immr,
- .n = n,
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
-
- fn addSubtractShiftedRegister(
- op: u1,
- s: u1,
- shift: AddSubtractShiftedRegisterShift,
- rd: Register,
- rn: Register,
- rm: Register,
- imm6: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .add_subtract_shifted_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm6 = imm6,
- .rm = rm.enc(),
- .shift = @intFromEnum(shift),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- pub const AddSubtractExtendedRegisterOption = enum(u3) {
- uxtb,
- uxth,
- uxtw,
- uxtx, // serves also as lsl
- sxtb,
- sxth,
- sxtw,
- sxtx,
- };
-
- fn addSubtractExtendedRegister(
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return Instruction{
- .add_subtract_extended_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm3 = imm3,
- .option = @intFromEnum(extend),
- .rm = rm.enc(),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn conditionalBranch(
- o0: u1,
- o1: u1,
- cond: Condition,
- offset: i21,
- ) Instruction {
- assert(offset & 0b11 == 0b00);
-
- return Instruction{
- .conditional_branch = .{
- .cond = @intFromEnum(cond),
- .o0 = o0,
- .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
- .o1 = o1,
- },
- };
- }
-
- fn compareAndBranch(
- op: u1,
- rt: Register,
- offset: i21,
- ) Instruction {
- assert(offset & 0b11 == 0b00);
-
- return Instruction{
- .compare_and_branch = .{
- .rt = rt.enc(),
- .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
- .op = op,
- .sf = switch (rt.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn conditionalSelect(
- op2: u2,
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- cond: Condition,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .conditional_select = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .op2 = op2,
- .cond = @intFromEnum(cond),
- .rm = rm.enc(),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn dataProcessing3Source(
- op54: u2,
- op31: u3,
- o0: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- ra: Register,
- ) Instruction {
- return Instruction{
- .data_processing_3_source = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .ra = ra.enc(),
- .o0 = o0,
- .rm = rm.enc(),
- .op31 = op31,
- .op54 = op54,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn dataProcessing2Source(
- s: u1,
- opcode: u6,
- rd: Register,
- rn: Register,
- rm: Register,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .data_processing_2_source = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .opcode = opcode,
- .rm = rm.enc(),
- .s = s,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- // Helper functions for assembly syntax functions
-
- // Move wide (immediate)
-
- pub fn movn(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b00, rd, imm16, shift);
- }
-
- pub fn movz(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b10, rd, imm16, shift);
- }
-
- pub fn movk(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b11, rd, imm16, shift);
- }
-
- // PC relative address
-
- pub fn adr(rd: Register, imm21: i21) Instruction {
- return pcRelativeAddress(rd, imm21, 0b0);
- }
-
- pub fn adrp(rd: Register, imm21: i21) Instruction {
- return pcRelativeAddress(rd, imm21, 0b1);
- }
-
- // Load or store register
-
- pub fn ldrLiteral(rt: Register, literal: u19) Instruction {
- return loadLiteral(rt, literal);
- }
-
- pub fn ldr(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldr);
- }
-
- pub fn ldrh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrh);
- }
-
- pub fn ldrb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrb);
- }
-
- pub fn ldrsb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsb);
- }
-
- pub fn ldrsh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsh);
- }
-
- pub fn ldrsw(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsw);
- }
-
- pub fn str(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .str);
- }
-
- pub fn strh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .strh);
- }
-
- pub fn strb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .strb);
- }
-
- // Load or store pair of registers
-
- pub const LoadStorePairOffset = struct {
- encoding: enum(u2) {
- post_index = 0b01,
- signed = 0b10,
- pre_index = 0b11,
- },
- offset: i9,
-
- pub fn none() LoadStorePairOffset {
- return .{ .encoding = .signed, .offset = 0 };
- }
-
- pub fn post_index(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .post_index, .offset = imm };
- }
-
- pub fn pre_index(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .pre_index, .offset = imm };
- }
-
- pub fn signed(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .signed, .offset = imm };
- }
- };
-
- pub fn ldp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @intFromEnum(offset.encoding), true);
- }
-
- pub fn ldnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, true);
- }
-
- pub fn stp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @intFromEnum(offset.encoding), false);
- }
-
- pub fn stnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, false);
- }
-
- // Exception generation
-
- pub fn svc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b01, imm16);
- }
-
- pub fn hvc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b10, imm16);
- }
-
- pub fn smc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b11, imm16);
- }
-
- pub fn brk(imm16: u16) Instruction {
- return exceptionGeneration(0b001, 0b000, 0b00, imm16);
- }
-
- pub fn hlt(imm16: u16) Instruction {
- return exceptionGeneration(0b010, 0b000, 0b00, imm16);
- }
-
- // Unconditional branch (register)
-
- pub fn br(rn: Register) Instruction {
- return unconditionalBranchRegister(0b0000, 0b11111, 0b000000, rn, 0b00000);
- }
-
- pub fn blr(rn: Register) Instruction {
- return unconditionalBranchRegister(0b0001, 0b11111, 0b000000, rn, 0b00000);
- }
-
- pub fn ret(rn: ?Register) Instruction {
- return unconditionalBranchRegister(0b0010, 0b11111, 0b000000, rn orelse .x30, 0b00000);
- }
-
- // Unconditional branch (immediate)
-
- pub fn b(offset: i28) Instruction {
- return unconditionalBranchImmediate(0, offset);
- }
-
- pub fn bl(offset: i28) Instruction {
- return unconditionalBranchImmediate(1, offset);
- }
-
- // Nop
-
- pub fn nop() Instruction {
- return Instruction{ .no_operation = .{} };
- }
-
- // Logical (shifted register)
-
- pub fn andShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn bicShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn orrShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn ornShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn eorShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn eonShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn andsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn bicsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount);
- }
-
- // Add/subtract (immediate)
-
- pub fn add(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b0, 0b0, rd, rn, imm, shift);
- }
-
- pub fn adds(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b0, 0b1, rd, rn, imm, shift);
- }
-
- pub fn sub(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b1, 0b0, rd, rn, imm, shift);
- }
-
- pub fn subs(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
- }
-
- // Logical (immediate)
-
- pub fn andImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b00, rd, rn, imms, immr, n);
- }
-
- pub fn orrImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b01, rd, rn, imms, immr, n);
- }
-
- pub fn eorImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b10, rd, rn, imms, immr, n);
- }
-
- pub fn andsImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b11, rd, rn, imms, immr, n);
- }
-
- // Bitfield
-
- pub fn sbfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b00, n, rd, rn, immr, imms);
- }
-
- pub fn bfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b01, n, rd, rn, immr, imms);
- }
-
- pub fn ubfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b10, n, rd, rn, immr, imms);
- }
-
- pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @as(u6, @intCast(rd.size() - 1));
- return sbfm(rd, rn, shift, imms);
- }
-
- pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
- }
-
- pub fn sxtb(rd: Register, rn: Register) Instruction {
- return sbfm(rd, rn, 0, 7);
- }
-
- pub fn sxth(rd: Register, rn: Register) Instruction {
- return sbfm(rd, rn, 0, 15);
- }
-
- pub fn sxtw(rd: Register, rn: Register) Instruction {
- assert(rd.size() == 64);
- return sbfm(rd, rn, 0, 31);
- }
-
- pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const size = @as(u6, @intCast(rd.size() - 1));
- return ubfm(rd, rn, size - shift + 1, size - shift);
- }
-
- pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @as(u6, @intCast(rd.size() - 1));
- return ubfm(rd, rn, shift, imms);
- }
-
- pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
- }
-
- pub fn uxtb(rd: Register, rn: Register) Instruction {
- return ubfm(rd, rn, 0, 7);
- }
-
- pub fn uxth(rd: Register, rn: Register) Instruction {
- return ubfm(rd, rn, 0, 15);
- }
-
- // Add/subtract (shifted register)
-
- pub fn addShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b0, 0b0, shift, rd, rn, rm, imm6);
- }
-
- pub fn addsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b0, 0b1, shift, rd, rn, rm, imm6);
- }
-
- pub fn subShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b1, 0b0, shift, rd, rn, rm, imm6);
- }
-
- pub fn subsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b1, 0b1, shift, rd, rn, rm, imm6);
- }
-
- // Add/subtract (extended register)
-
- pub fn addExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b0, 0b0, rd, rn, rm, extend, imm3);
- }
-
- pub fn addsExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b0, 0b1, rd, rn, rm, extend, imm3);
- }
-
- pub fn subExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b1, 0b0, rd, rn, rm, extend, imm3);
- }
-
- pub fn subsExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b1, 0b1, rd, rn, rm, extend, imm3);
- }
-
- // Conditional branch
-
- pub fn bCond(cond: Condition, offset: i21) Instruction {
- return conditionalBranch(0b0, 0b0, cond, offset);
- }
-
- // Compare and branch
-
- pub fn cbz(rt: Register, offset: i21) Instruction {
- return compareAndBranch(0b0, rt, offset);
- }
-
- pub fn cbnz(rt: Register, offset: i21) Instruction {
- return compareAndBranch(0b1, rt, offset);
- }
-
- // Conditional select
-
- pub fn csel(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b00, 0b0, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csinc(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b01, 0b0, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csinv(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b00, 0b1, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond);
- }
-
- // Data processing (3 source)
-
- pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
- }
-
- pub fn smaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
- return dataProcessing3Source(0b00, 0b001, 0b0, rd, rn, rm, ra);
- }
-
- pub fn umaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
- return dataProcessing3Source(0b00, 0b101, 0b0, rd, rn, rm, ra);
- }
-
- pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
- }
-
- pub fn mul(rd: Register, rn: Register, rm: Register) Instruction {
- return madd(rd, rn, rm, .xzr);
- }
-
- pub fn smull(rd: Register, rn: Register, rm: Register) Instruction {
- return smaddl(rd, rn, rm, .xzr);
- }
-
- pub fn smulh(rd: Register, rn: Register, rm: Register) Instruction {
- assert(rd.size() == 64);
- return dataProcessing3Source(0b00, 0b010, 0b0, rd, rn, rm, .xzr);
- }
-
- pub fn umull(rd: Register, rn: Register, rm: Register) Instruction {
- return umaddl(rd, rn, rm, .xzr);
- }
-
- pub fn umulh(rd: Register, rn: Register, rm: Register) Instruction {
- assert(rd.size() == 64);
- return dataProcessing3Source(0b00, 0b110, 0b0, rd, rn, rm, .xzr);
- }
-
- pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
- return msub(rd, rn, rm, .xzr);
- }
-
- // Data processing (2 source)
-
- pub fn udiv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b000010, rd, rn, rm);
- }
-
- pub fn sdiv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b000011, rd, rn, rm);
- }
-
- pub fn lslv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001000, rd, rn, rm);
- }
-
- pub fn lsrv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001001, rd, rn, rm);
- }
-
- pub fn asrv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001010, rd, rn, rm);
- }
-
- pub const asrRegister = asrv;
- pub const lslRegister = lslv;
- pub const lsrRegister = lsrv;
-};
-
-test {
- testing.refAllDecls(@This());
-}
-
-test "serialize instructions" {
- const Testcase = struct {
- inst: Instruction,
- expected: u32,
- };
-
- const testcases = [_]Testcase{
- .{ // orr x0, xzr, x1
- .inst = Instruction.orrShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
- .expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
- },
- .{ // orn x0, xzr, x1
- .inst = Instruction.ornShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
- .expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
- },
- .{ // movz x1, #4
- .inst = Instruction.movz(.x1, 4, 0),
- .expected = 0b1_10_100101_00_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 16
- .inst = Instruction.movz(.x1, 4, 16),
- .expected = 0b1_10_100101_01_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 32
- .inst = Instruction.movz(.x1, 4, 32),
- .expected = 0b1_10_100101_10_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 48
- .inst = Instruction.movz(.x1, 4, 48),
- .expected = 0b1_10_100101_11_0000000000000100_00001,
- },
- .{ // movz w1, #4
- .inst = Instruction.movz(.w1, 4, 0),
- .expected = 0b0_10_100101_00_0000000000000100_00001,
- },
- .{ // movz w1, #4, lsl 16
- .inst = Instruction.movz(.w1, 4, 16),
- .expected = 0b0_10_100101_01_0000000000000100_00001,
- },
- .{ // svc #0
- .inst = Instruction.svc(0),
- .expected = 0b1101_0100_000_0000000000000000_00001,
- },
- .{ // svc #0x80 ; typical on Darwin
- .inst = Instruction.svc(0x80),
- .expected = 0b1101_0100_000_0000000010000000_00001,
- },
- .{ // ret
- .inst = Instruction.ret(null),
- .expected = 0b1101_011_00_10_11111_0000_00_11110_00000,
- },
- .{ // bl #0x10
- .inst = Instruction.bl(0x10),
- .expected = 0b1_00101_00_0000_0000_0000_0000_0000_0100,
- },
- .{ // ldr x2, [x1]
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b11_111_0_01_01_000000000000_00001_00010,
- },
- .{ // ldr x2, [x1, #1]!
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_pre_index(1)),
- .expected = 0b11_111_0_00_01_0_000000001_11_00001_00010,
- },
- .{ // ldr x2, [x1], #-1
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_post_index(-1)),
- .expected = 0b11_111_0_00_01_0_111111111_01_00001_00010,
- },
- .{ // ldr x2, [x1], (x3)
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
- .expected = 0b11_111_0_00_01_1_00011_011_0_10_00001_00010,
- },
- .{ // ldr x2, label
- .inst = Instruction.ldrLiteral(.x2, 0x1),
- .expected = 0b01_011_0_00_0000000000000000001_00010,
- },
- .{ // ldrh x7, [x4], #0xaa
- .inst = Instruction.ldrh(.x7, .x4, Instruction.LoadStoreOffset.imm_post_index(0xaa)),
- .expected = 0b01_111_0_00_01_0_010101010_01_00100_00111,
- },
- .{ // ldrb x9, [x15, #0xff]!
- .inst = Instruction.ldrb(.x9, .x15, Instruction.LoadStoreOffset.imm_pre_index(0xff)),
- .expected = 0b00_111_0_00_01_0_011111111_11_01111_01001,
- },
- .{ // str x2, [x1]
- .inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b11_111_0_01_00_000000000000_00001_00010,
- },
- .{ // str x2, [x1], (x3)
- .inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
- .expected = 0b11_111_0_00_00_1_00011_011_0_10_00001_00010,
- },
- .{ // strh w0, [x1]
- .inst = Instruction.strh(.w0, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b01_111_0_01_00_000000000000_00001_00000,
- },
- .{ // strb w8, [x9]
- .inst = Instruction.strb(.w8, .x9, Instruction.LoadStoreOffset.none),
- .expected = 0b00_111_0_01_00_000000000000_01001_01000,
- },
- .{ // adr x2, #0x8
- .inst = Instruction.adr(.x2, 0x8),
- .expected = 0b0_00_10000_0000000000000000010_00010,
- },
- .{ // adr x2, -#0x8
- .inst = Instruction.adr(.x2, -0x8),
- .expected = 0b0_00_10000_1111111111111111110_00010,
- },
- .{ // adrp x2, #0x8
- .inst = Instruction.adrp(.x2, 0x8),
- .expected = 0b1_00_10000_0000000000000000010_00010,
- },
- .{ // adrp x2, -#0x8
- .inst = Instruction.adrp(.x2, -0x8),
- .expected = 0b1_00_10000_1111111111111111110_00010,
- },
- .{ // stp x1, x2, [sp, #8]
- .inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
- .expected = 0b10_101_0_010_0_0000001_00010_11111_00001,
- },
- .{ // ldp x1, x2, [sp, #8]
- .inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
- .expected = 0b10_101_0_010_1_0000001_00010_11111_00001,
- },
- .{ // stp x1, x2, [sp, #-16]!
- .inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.pre_index(-16)),
- .expected = 0b10_101_0_011_0_1111110_00010_11111_00001,
- },
- .{ // ldp x1, x2, [sp], #16
- .inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.post_index(16)),
- .expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
- },
- .{ // and x0, x4, x2
- .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0),
- .expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
- },
- .{ // and x0, x4, x2, lsl #0x8
- .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0x8),
- .expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
- },
- .{ // add x0, x10, #10
- .inst = Instruction.add(.x0, .x10, 10, false),
- .expected = 0b1_0_0_100010_0_0000_0000_1010_01010_00000,
- },
- .{ // subs x0, x5, #11, lsl #12
- .inst = Instruction.subs(.x0, .x5, 11, true),
- .expected = 0b1_1_1_100010_1_0000_0000_1011_00101_00000,
- },
- .{ // b.hi #-4
- .inst = Instruction.bCond(.hi, -4),
- .expected = 0b0101010_0_1111111111111111111_0_1000,
- },
- .{ // cbz x10, #40
- .inst = Instruction.cbz(.x10, 40),
- .expected = 0b1_011010_0_0000000000000001010_01010,
- },
- .{ // add x0, x1, x2, lsl #5
- .inst = Instruction.addShiftedRegister(.x0, .x1, .x2, .lsl, 5),
- .expected = 0b1_0_0_01011_00_0_00010_000101_00001_00000,
- },
- .{ // csinc x1, x2, x4, eq
- .inst = Instruction.csinc(.x1, .x2, .x4, .eq),
- .expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001,
- },
- .{ // mul x1, x4, x9
- .inst = Instruction.mul(.x1, .x4, .x9),
- .expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
- },
- .{ // eor x3, x5, #1
- .inst = Instruction.eorImmediate(.x3, .x5, 0b000000, 0b000000, 0b1),
- .expected = 0b1_10_100100_1_000000_000000_00101_00011,
- },
- .{ // lslv x6, x9, x10
- .inst = Instruction.lslv(.x6, .x9, .x10),
- .expected = 0b1_0_0_11010110_01010_0010_00_01001_00110,
- },
- .{ // lsl x4, x2, #42
- .inst = Instruction.lslImmediate(.x4, .x2, 42),
- .expected = 0b1_10_100110_1_010110_010101_00010_00100,
- },
- .{ // lsl x4, x2, #63
- .inst = Instruction.lslImmediate(.x4, .x2, 63),
- .expected = 0b1_10_100110_1_000001_000000_00010_00100,
- },
- .{ // lsr x4, x2, #42
- .inst = Instruction.lsrImmediate(.x4, .x2, 42),
- .expected = 0b1_10_100110_1_101010_111111_00010_00100,
- },
- .{ // lsr x4, x2, #63
- .inst = Instruction.lsrImmediate(.x4, .x2, 63),
- .expected = 0b1_10_100110_1_111111_111111_00010_00100,
- },
- .{ // umull x0, w0, w1
- .inst = Instruction.umull(.x0, .w0, .w1),
- .expected = 0b1_00_11011_1_01_00001_0_11111_00000_00000,
- },
- .{ // smull x0, w0, w1
- .inst = Instruction.smull(.x0, .w0, .w1),
- .expected = 0b1_00_11011_0_01_00001_0_11111_00000_00000,
- },
- .{ // tst x0, #0xffffffff00000000
- .inst = Instruction.andsImmediate(.xzr, .x0, 0b011111, 0b100000, 0b1),
- .expected = 0b1_11_100100_1_100000_011111_00000_11111,
- },
- .{ // umulh x0, x1, x2
- .inst = Instruction.umulh(.x0, .x1, .x2),
- .expected = 0b1_00_11011_1_10_00010_0_11111_00001_00000,
- },
- .{ // smulh x0, x1, x2
- .inst = Instruction.smulh(.x0, .x1, .x2),
- .expected = 0b1_00_11011_0_10_00010_0_11111_00001_00000,
- },
- .{ // adds x0, x1, x2, sxtx
- .inst = Instruction.addsExtendedRegister(.x0, .x1, .x2, .sxtx, 0),
- .expected = 0b1_0_1_01011_00_1_00010_111_000_00001_00000,
- },
- };
-
- for (testcases) |case| {
- const actual = case.inst.toU32();
- try testing.expectEqual(case.expected, actual);
- }
-}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 0753cc5d16..ffff65d4d1 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -744,7 +744,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -767,7 +767,7 @@ pub fn generate(
.pt = pt,
.mod = mod,
.bin_file = bin_file,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.owner = .{ .nav_index = func.owner_nav },
.args = undefined, // populated after `resolveCallingConventionValues`
@@ -4584,7 +4584,7 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const field_offset: i32 = switch (container_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
.@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
- (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ (if (zcu.typeToStruct(container_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, index) else 0) -
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
@@ -4615,7 +4615,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8),
.@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type|
- pt.structPackedFieldBitOffset(struct_type, index)
+ zcu.structPackedFieldBitOffset(struct_type, index)
else
0,
};
@@ -8059,7 +8059,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
+ const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try func.resolveInst(elem);
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 31a7f39d69..6ab5dea4ec 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -267,7 +267,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -288,7 +288,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = target,
.bin_file = lf,
.func_index = func_index,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 50d104a7bc..e56fb1df96 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1173,7 +1173,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) Error!Mir {
_ = src_loc;
_ = bin_file;
@@ -1194,7 +1194,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.owner_nav = cg.owner_nav,
.target = target,
.ptr_size = switch (target.cpu.arch) {
@@ -1886,8 +1886,10 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.call_never_tail => cg.airCall(inst, .never_tail),
.call_never_inline => cg.airCall(inst, .never_inline),
- .is_err => cg.airIsErr(inst, .i32_ne),
- .is_non_err => cg.airIsErr(inst, .i32_eq),
+ .is_err => cg.airIsErr(inst, .i32_ne, .value),
+ .is_non_err => cg.airIsErr(inst, .i32_eq, .value),
+ .is_err_ptr => cg.airIsErr(inst, .i32_ne, .ptr),
+ .is_non_err_ptr => cg.airIsErr(inst, .i32_eq, .ptr),
.is_null => cg.airIsNull(inst, .i32_eq, .value),
.is_non_null => cg.airIsNull(inst, .i32_ne, .value),
@@ -1970,8 +1972,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.runtime_nav_ptr => cg.airRuntimeNavPtr(inst),
.assembly,
- .is_err_ptr,
- .is_non_err_ptr,
.err_return_trace,
.set_err_return_trace,
@@ -3776,7 +3776,7 @@ fn structFieldPtr(
break :offset @as(u32, 0);
}
const struct_type = zcu.typeToStruct(struct_ty).?;
- break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ break :offset @divExact(zcu.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
.@"union" => 0,
else => unreachable,
@@ -3812,7 +3812,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
.@"struct" => result: {
const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
- const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
+ const offset = zcu.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
const host_bits = backing_ty.intInfo(zcu).bits;
@@ -4105,7 +4105,7 @@ fn airSwitchDispatch(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, .none, &.{br.operand});
}
-fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
+fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const zcu = cg.pt.zcu;
const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try cg.resolveInst(un_op);
@@ -4122,7 +4122,7 @@ fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerEr
}
try cg.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (op_kind == .ptr or pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try cg.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
.alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
@@ -5696,7 +5696,7 @@ fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.auto, .@"extern" => parent_ty.structFieldOffset(field_index, zcu),
.@"packed" => offset: {
const parent_ptr_offset = parent_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
- const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
+ const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
const field_ptr_offset = field_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
break :offset @divExact(parent_ptr_offset + field_offset - field_ptr_offset, 8);
},
@@ -6462,9 +6462,6 @@ fn lowerTry(
operand_is_ptr: bool,
) InnerError!WValue {
const zcu = cg.pt.zcu;
- if (operand_is_ptr) {
- return cg.fail("TODO: lowerTry for pointers", .{});
- }
const pl_ty = err_union_ty.errorUnionPayload(zcu);
const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(zcu);
@@ -6475,7 +6472,7 @@ fn lowerTry(
// check if the error tag is set for the error union.
try cg.emitWValue(err_union);
- if (pl_has_bits) {
+ if (pl_has_bits or operand_is_ptr) {
const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try cg.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
@@ -6497,12 +6494,12 @@ fn lowerTry(
}
// if we reach here it means error was not set, and we want the payload
- if (!pl_has_bits) {
+ if (!pl_has_bits and !operand_is_ptr) {
return .none;
}
const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
- if (isByRef(pl_ty, zcu, cg.target)) {
+ if (operand_is_ptr or isByRef(pl_ty, zcu, cg.target)) {
return buildPointerOffset(cg, err_union, pl_offset, .new);
}
const payload = try cg.load(err_union, pl_ty, pl_offset);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 6341f7e3d2..ad2d1b580f 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -878,7 +878,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) codegen.CodeGenError!Mir {
_ = bin_file;
const zcu = pt.zcu;
@@ -894,7 +894,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.mod = mod,
.owner = .{ .nav_index = func.owner_nav },
@@ -1103,11 +1103,7 @@ const FormatAirData = struct {
inst: Air.Inst.Index,
};
fn formatAir(data: FormatAirData, w: *std.io.Writer) Writer.Error!void {
- // not acceptable implementation because it ignores `w`:
- //data.self.air.dumpInst(data.inst, data.self.pt, data.self.liveness);
- _ = data;
- _ = w;
- @panic("TODO: unimplemented");
+ data.self.air.writeInst(w, data.inst, data.self.pt, data.self.liveness);
}
fn fmtAir(self: *CodeGen, inst: Air.Inst.Index) std.fmt.Formatter(FormatAirData, formatAir) {
return .{ .data = .{ .self = self, .inst = inst } };
@@ -100674,11 +100670,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{struct_field.struct_operand});
- try ops[0].toOffset(cg.fieldOffset(
+ try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(struct_field.struct_operand),
ty_pl.ty.toType(),
struct_field.field_index,
- ), cg);
+ zcu,
+ )), cg);
try ops[0].finish(inst, &.{struct_field.struct_operand}, &ops, cg);
},
.struct_field_ptr_index_0,
@@ -100688,7 +100685,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
=> |air_tag| {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
- try ops[0].toOffset(cg.fieldOffset(
+ try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(ty_op.operand),
ty_op.ty.toType(),
switch (air_tag) {
@@ -100698,7 +100695,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.struct_field_ptr_index_2 => 2,
.struct_field_ptr_index_3 => 3,
},
- ), cg);
+ zcu,
+ )), cg);
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
},
.struct_field_val => {
@@ -168108,11 +168106,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const field_parent_ptr = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{field_parent_ptr.field_ptr});
- try ops[0].toOffset(-cg.fieldOffset(
+ try ops[0].toOffset(-@as(i32, @intCast(codegen.fieldOffset(
ty_pl.ty.toType(),
cg.typeOf(field_parent_ptr.field_ptr),
field_parent_ptr.field_index,
- ), cg);
+ zcu,
+ ))), cg);
try ops[0].finish(inst, &.{field_parent_ptr.field_ptr}, &ops, cg);
},
.wasm_memory_size, .wasm_memory_grow => unreachable,
@@ -168138,7 +168137,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
@@ -168162,7 +168161,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
@@ -168186,7 +168185,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
@@ -174809,18 +174808,6 @@ fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn fieldOffset(self: *CodeGen, ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32) i32 {
- const pt = self.pt;
- const zcu = pt.zcu;
- const agg_ty = ptr_agg_ty.childType(zcu);
- return switch (agg_ty.containerLayout(zcu)) {
- .auto, .@"extern" => @intCast(agg_ty.structFieldOffset(field_index, zcu)),
- .@"packed" => @divExact(@as(i32, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
- (if (zcu.typeToStruct(agg_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
- ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
- };
-}
-
fn genUnOp(self: *CodeGen, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
@@ -179309,10 +179296,13 @@ fn lowerSwitchBr(
} else undefined;
const table_start: u31 = @intCast(cg.mir_table.items.len);
{
- const condition_index_reg = if (condition_index.isRegister())
- condition_index.getReg().?
- else
- try cg.copyToTmpRegister(.usize, condition_index);
+ const condition_index_reg = condition_index_reg: {
+ if (condition_index.isRegister()) {
+ const condition_index_reg = condition_index.getReg().?;
+ if (condition_index_reg.isClass(.general_purpose)) break :condition_index_reg condition_index_reg;
+ }
+ break :condition_index_reg try cg.copyToTmpRegister(.usize, condition_index);
+ };
const condition_index_lock = cg.register_manager.lockReg(condition_index_reg);
defer if (condition_index_lock) |lock| cg.register_manager.unlockReg(lock);
try cg.truncateRegister(condition_ty, condition_index_reg);
@@ -184575,7 +184565,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
}
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = pt.structPackedFieldBitOffset(loaded_struct, elem_i);
+ const elem_off = zcu.structPackedFieldBitOffset(loaded_struct, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -185625,21 +185615,19 @@ fn resolveCallingConventionValues(
fn fail(cg: *CodeGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
- switch (cg.owner) {
- .nav_index => |i| return zcu.codegenFail(i, format, args),
- .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
- }
- return error.CodegenFail;
+ return switch (cg.owner) {
+ .nav_index => |i| zcu.codegenFail(i, format, args),
+ .lazy_sym => |s| zcu.codegenFailType(s.ty, format, args),
+ };
}
fn failMsg(cg: *CodeGen, msg: *Zcu.ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
- switch (cg.owner) {
- .nav_index => |i| return zcu.codegenFailMsg(i, msg),
- .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
- }
- return error.CodegenFail;
+ return switch (cg.owner) {
+ .nav_index => |i| zcu.codegenFailMsg(i, msg),
+ .lazy_sym => |s| zcu.codegenFailTypeMsg(s.ty, msg),
+ };
}
fn parseRegName(name: []const u8) ?Register {
@@ -191932,18 +191920,15 @@ const Select = struct {
error.InvalidInstruction => {
const fixes = @tagName(mir_tag[0]);
const fixes_blank = std.mem.indexOfScalar(u8, fixes, '_').?;
- return s.cg.fail(
- "invalid instruction: '{s}{s}{s} {s} {s} {s} {s}'",
- .{
- fixes[0..fixes_blank],
- @tagName(mir_tag[1]),
- fixes[fixes_blank + 1 ..],
- @tagName(mir_ops[0]),
- @tagName(mir_ops[1]),
- @tagName(mir_ops[2]),
- @tagName(mir_ops[3]),
- },
- );
+ return s.cg.fail("invalid instruction: '{s}{s}{s} {s} {s} {s} {s}'", .{
+ fixes[0..fixes_blank],
+ @tagName(mir_tag[1]),
+ fixes[fixes_blank + 1 ..],
+ @tagName(mir_ops[0]),
+ @tagName(mir_ops[1]),
+ @tagName(mir_ops[2]),
+ @tagName(mir_ops[3]),
+ });
},
else => |e| return e,
};
@@ -194435,6 +194420,18 @@ fn select(
while (true) for (pattern.src[0..src_temps.len], src_temps) |src_pattern, *src_temp| {
if (try src_pattern.convert(src_temp, cg)) break;
} else break;
+ var src_locks: [s_src_temps.len][2]?RegisterLock = @splat(@splat(null));
+ for (src_locks[0..src_temps.len], src_temps) |*locks, src_temp| {
+ const regs: [2]Register = switch (src_temp.tracking(cg).short) {
+ else => continue,
+ .register => |reg| .{ reg, .none },
+ .register_pair => |regs| regs,
+ };
+ for (regs, locks) |reg, *lock| {
+ if (reg == .none) continue;
+ lock.* = cg.register_manager.lockRegIndex(RegisterManager.indexOfRegIntoTracked(reg) orelse continue);
+ }
+ }
@memcpy(s_src_temps[0..src_temps.len], src_temps);
std.mem.swap(Temp, &s_src_temps[pattern.commute[0]], &s_src_temps[pattern.commute[1]]);
@@ -194453,6 +194450,7 @@ fn select(
}
assert(s.top == 0);
+ for (src_locks) |locks| for (locks) |lock| if (lock) |reg| cg.register_manager.unlockReg(reg);
for (tmp_locks) |locks| for (locks) |lock| if (lock) |reg| cg.register_manager.unlockReg(reg);
for (dst_locks) |locks| for (locks) |lock| if (lock) |reg| cg.register_manager.unlockReg(reg);
caller_preserved: {
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index da15dc6bfb..49c67620d5 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -168,11 +168,12 @@ pub fn emitMir(emit: *Emit) Error!void {
else if (emit.bin_file.cast(.macho)) |macho_file|
macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
- else if (emit.bin_file.cast(.coff)) |coff_file| sym_index: {
- const atom = coff_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym) catch |err|
- return emit.fail("{s} creating lazy symbol", .{@errorName(err)});
- break :sym_index coff_file.getAtom(atom).getSymbolIndex().?;
- } else if (emit.bin_file.cast(.plan9)) |p9_file|
+ else if (emit.bin_file.cast(.coff)) |coff_file|
+ if (coff_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym)) |atom|
+ coff_file.getAtom(atom).getSymbolIndex().?
+ else |err|
+ return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
+ else if (emit.bin_file.cast(.plan9)) |p9_file|
p9_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
else
diff --git a/src/codegen.zig b/src/codegen.zig
index 9bddc51963..17e3c8504a 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -22,6 +22,8 @@ const Zir = std.zig.Zir;
const Alignment = InternPool.Alignment;
const dev = @import("dev.zig");
+pub const aarch64 = @import("codegen/aarch64.zig");
+
pub const CodeGenError = GenerateSymbolError || error{
/// Indicates the error is already stored in Zcu `failed_codegen`.
CodegenFail,
@@ -48,7 +50,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
- .stage2_aarch64 => unreachable,
+ .stage2_aarch64 => aarch64,
.stage2_arm => unreachable,
.stage2_c => @import("codegen/c.zig"),
.stage2_llvm => @import("codegen/llvm.zig"),
@@ -71,6 +73,7 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
.stage2_c,
.stage2_wasm,
.stage2_x86_64,
+ .stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
@@ -82,20 +85,29 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
}
}
+pub fn wantsLiveness(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) bool {
+ const zcu = pt.zcu;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ return switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
+ else => true,
+ .stage2_aarch64 => false,
+ };
+}
+
/// Every code generation backend has a different MIR representation. However, we want to pass
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
pub const AnyMir = union {
- riscv64: @import("arch/riscv64/Mir.zig"),
- sparc64: @import("arch/sparc64/Mir.zig"),
- x86_64: @import("arch/x86_64/Mir.zig"),
- wasm: @import("arch/wasm/Mir.zig"),
- c: @import("codegen/c.zig").Mir,
+ aarch64: if (dev.env.supports(.aarch64_backend)) @import("codegen/aarch64/Mir.zig") else noreturn,
+ riscv64: if (dev.env.supports(.riscv64_backend)) @import("arch/riscv64/Mir.zig") else noreturn,
+ sparc64: if (dev.env.supports(.sparc64_backend)) @import("arch/sparc64/Mir.zig") else noreturn,
+ x86_64: if (dev.env.supports(.x86_64_backend)) @import("arch/x86_64/Mir.zig") else noreturn,
+ wasm: if (dev.env.supports(.wasm_backend)) @import("arch/wasm/Mir.zig") else noreturn,
+ c: if (dev.env.supports(.c_backend)) @import("codegen/c.zig").Mir else noreturn,
pub inline fn tag(comptime backend: std.builtin.CompilerBackend) []const u8 {
return switch (backend) {
.stage2_aarch64 => "aarch64",
- .stage2_arm => "arm",
.stage2_riscv64 => "riscv64",
.stage2_sparc64 => "sparc64",
.stage2_x86_64 => "x86_64",
@@ -110,7 +122,8 @@ pub const AnyMir = union {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@@ -131,14 +144,15 @@ pub fn generateFunction(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@@ -173,7 +187,8 @@ pub fn emitFunction(
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
=> |backend| {
@@ -420,7 +435,7 @@ pub fn generateSymbol(
const int_tag_ty = ty.intTagType(zcu);
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
},
- .float => |float| switch (float.storage) {
+ .float => |float| storage: switch (float.storage) {
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
@@ -429,7 +444,13 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(gpa, 0, abi_size - 10);
},
- .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
+ .f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
+ else => unreachable,
+ 16 => continue :storage .{ .f16 = @floatCast(f128_val) },
+ 32 => continue :storage .{ .f32 = @floatCast(f128_val) },
+ 64 => continue :storage .{ .f64 = @floatCast(f128_val) },
+ 128 => writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
+ },
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
.slice => |slice| {
@@ -1218,3 +1239,17 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
return 0;
}
}
+
+pub fn fieldOffset(ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32, zcu: *Zcu) u64 {
+ const agg_ty = ptr_agg_ty.childType(zcu);
+ return switch (agg_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => agg_ty.structFieldOffset(field_index, zcu),
+ .@"packed" => @divExact(@as(u64, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+ (if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
+ ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
+ };
+}
+
+test {
+ _ = aarch64;
+}
diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig
new file mode 100644
index 0000000000..2904d36b7f
--- /dev/null
+++ b/src/codegen/aarch64.zig
@@ -0,0 +1,205 @@
+pub const abi = @import("aarch64/abi.zig");
+pub const Assemble = @import("aarch64/Assemble.zig");
+pub const Disassemble = @import("aarch64/Disassemble.zig");
+pub const encoding = @import("aarch64/encoding.zig");
+pub const Mir = @import("aarch64/Mir.zig");
+pub const Select = @import("aarch64/Select.zig");
+
+pub fn legalizeFeatures(_: *const std.Target) ?*Air.Legalize.Features {
+ return null;
+}
+
+pub fn generate(
+ _: *link.File,
+ pt: Zcu.PerThread,
+ _: Zcu.LazySrcLoc,
+ func_index: InternPool.Index,
+ air: *const Air,
+ liveness: *const ?Air.Liveness,
+) !Mir {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const func = zcu.funcInfo(func_index);
+ const func_zir = func.zir_body_inst.resolveFull(ip).?;
+ const file = zcu.fileByIndex(func_zir.file);
+ const named_params_len = file.zir.?.getParamBody(func_zir.inst).len;
+ const func_type = ip.indexToKey(func.ty).func_type;
+ assert(liveness.* == null);
+
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
+ var isel: Select = .{
+ .pt = pt,
+ .target = &mod.resolved_target.result,
+ .air = air.*,
+ .nav_index = zcu.funcInfo(func_index).owner_nav,
+
+ .def_order = .empty,
+ .blocks = .empty,
+ .loops = .empty,
+ .active_loops = .empty,
+ .loop_live = .{
+ .set = .empty,
+ .list = .empty,
+ },
+ .dom_start = 0,
+ .dom_len = 0,
+ .dom = .empty,
+
+ .saved_registers = comptime .initEmpty(),
+ .instructions = .empty,
+ .literals = .empty,
+ .nav_relocs = .empty,
+ .uav_relocs = .empty,
+ .lazy_relocs = .empty,
+ .global_relocs = .empty,
+ .literal_relocs = .empty,
+
+ .returns = false,
+ .va_list = undefined,
+ .stack_size = 0,
+ .stack_align = .@"16",
+
+ .live_registers = comptime .initFill(.free),
+ .live_values = .empty,
+ .values = .empty,
+ };
+ defer isel.deinit();
+ const is_sysv = !isel.target.os.tag.isDarwin() and isel.target.os.tag != .windows;
+ const is_sysv_var_args = is_sysv and func_type.is_var_args;
+
+ const air_main_body = air.getMainBody();
+ var param_it: Select.CallAbiIterator = .init;
+ const air_args = for (air_main_body, 0..) |air_inst_index, body_index| {
+ if (air.instructions.items(.tag)[@intFromEnum(air_inst_index)] != .arg) break air_main_body[0..body_index];
+ const arg = air.instructions.items(.data)[@intFromEnum(air_inst_index)].arg;
+ const param_ty = arg.ty.toType();
+ const param_vi = param_vi: {
+ if (arg.zir_param_index >= named_params_len) {
+ assert(func_type.is_var_args);
+ if (!is_sysv) break :param_vi try param_it.nonSysvVarArg(&isel, param_ty);
+ }
+ break :param_vi try param_it.param(&isel, param_ty);
+ };
+ tracking_log.debug("${d} <- %{d}", .{ @intFromEnum(param_vi.?), @intFromEnum(air_inst_index) });
+ try isel.live_values.putNoClobber(gpa, air_inst_index, param_vi.?);
+ } else unreachable;
+
+ const saved_gra_start = if (mod.strip) param_it.ngrn else Select.CallAbiIterator.ngrn_start;
+ const saved_gra_end = if (is_sysv_var_args) Select.CallAbiIterator.ngrn_end else param_it.ngrn;
+ const saved_gra_len = @intFromEnum(saved_gra_end) - @intFromEnum(saved_gra_start);
+
+ const saved_vra_start = if (mod.strip) param_it.nsrn else Select.CallAbiIterator.nsrn_start;
+ const saved_vra_end = if (is_sysv_var_args) Select.CallAbiIterator.nsrn_end else param_it.nsrn;
+ const saved_vra_len = @intFromEnum(saved_vra_end) - @intFromEnum(saved_vra_start);
+
+ const frame_record = 2;
+ const named_stack_args: Select.Value.Indirect = .{
+ .base = .fp,
+ .offset = 8 * std.mem.alignForward(u7, frame_record + saved_gra_len, 2),
+ };
+ const stack_var_args = named_stack_args.withOffset(param_it.nsaa);
+ const gr_top = named_stack_args;
+ const vr_top: Select.Value.Indirect = .{ .base = .fp, .offset = 0 };
+ isel.va_list = if (is_sysv) .{ .sysv = .{
+ .__stack = stack_var_args,
+ .__gr_top = gr_top,
+ .__vr_top = vr_top,
+ .__gr_offs = @as(i32, @intFromEnum(Select.CallAbiIterator.ngrn_end) - @intFromEnum(param_it.ngrn)) * -8,
+ .__vr_offs = @as(i32, @intFromEnum(Select.CallAbiIterator.nsrn_end) - @intFromEnum(param_it.nsrn)) * -16,
+ } } else .{ .other = stack_var_args };
+
+ // translate arg locations from caller-based to callee-based
+ for (air_args) |air_inst_index| {
+ assert(air.instructions.items(.tag)[@intFromEnum(air_inst_index)] == .arg);
+ const arg_vi = isel.live_values.get(air_inst_index).?;
+ const passed_vi = switch (arg_vi.parent(&isel)) {
+ .unallocated, .stack_slot => arg_vi,
+ .value, .constant => unreachable,
+ .address => |address_vi| address_vi,
+ };
+ switch (passed_vi.parent(&isel)) {
+ .unallocated => if (!mod.strip) {
+ var part_it = passed_vi.parts(&isel);
+ const first_passed_part_vi = part_it.next().?;
+ const hint_ra = first_passed_part_vi.hint(&isel).?;
+ passed_vi.setParent(&isel, .{ .stack_slot = if (hint_ra.isVector())
+ vr_top.withOffset(@as(i8, -16) * (@intFromEnum(saved_vra_end) - @intFromEnum(hint_ra)))
+ else
+ gr_top.withOffset(@as(i8, -8) * (@intFromEnum(saved_gra_end) - @intFromEnum(hint_ra))) });
+ },
+ .stack_slot => |stack_slot| {
+ assert(stack_slot.base == .sp);
+ passed_vi.changeStackSlot(&isel, named_stack_args.withOffset(stack_slot.offset));
+ },
+ .address, .value, .constant => unreachable,
+ }
+ }
+
+ ret: {
+ var ret_it: Select.CallAbiIterator = .init;
+ const ret_vi = try ret_it.ret(&isel, .fromInterned(func_type.return_type)) orelse break :ret;
+ tracking_log.debug("${d} <- %main", .{@intFromEnum(ret_vi)});
+ try isel.live_values.putNoClobber(gpa, Select.Block.main, ret_vi);
+ }
+
+ assert(!(try isel.blocks.getOrPut(gpa, Select.Block.main)).found_existing);
+ try isel.analyze(air_main_body);
+ try isel.finishAnalysis();
+ isel.verify(false);
+
+ isel.blocks.values()[0] = .{
+ .live_registers = isel.live_registers,
+ .target_label = @intCast(isel.instructions.items.len),
+ };
+ try isel.body(air_main_body);
+ if (isel.live_values.fetchRemove(Select.Block.main)) |ret_vi| {
+ switch (ret_vi.value.parent(&isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| try address_vi.liveIn(
+ &isel,
+ address_vi.hint(&isel).?,
+ comptime &.initFill(.free),
+ ),
+ }
+ ret_vi.value.deref(&isel);
+ }
+ isel.verify(true);
+
+ const prologue = isel.instructions.items.len;
+ const epilogue = try isel.layout(param_it, is_sysv_var_args, saved_gra_len, saved_vra_len, mod);
+
+ const instructions = try isel.instructions.toOwnedSlice(gpa);
+ var mir: Mir = .{
+ .prologue = instructions[prologue..epilogue],
+ .body = instructions[0..prologue],
+ .epilogue = instructions[epilogue..],
+ .literals = &.{},
+ .nav_relocs = &.{},
+ .uav_relocs = &.{},
+ .lazy_relocs = &.{},
+ .global_relocs = &.{},
+ .literal_relocs = &.{},
+ };
+ errdefer mir.deinit(gpa);
+ mir.literals = try isel.literals.toOwnedSlice(gpa);
+ mir.nav_relocs = try isel.nav_relocs.toOwnedSlice(gpa);
+ mir.uav_relocs = try isel.uav_relocs.toOwnedSlice(gpa);
+ mir.lazy_relocs = try isel.lazy_relocs.toOwnedSlice(gpa);
+ mir.global_relocs = try isel.global_relocs.toOwnedSlice(gpa);
+ mir.literal_relocs = try isel.literal_relocs.toOwnedSlice(gpa);
+ return mir;
+}
+
+test {
+ _ = Assemble;
+}
+
+const Air = @import("../Air.zig");
+const assert = std.debug.assert;
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const std = @import("std");
+const tracking_log = std.log.scoped(.tracking);
+const Zcu = @import("../Zcu.zig");
diff --git a/src/codegen/aarch64/Assemble.zig b/src/codegen/aarch64/Assemble.zig
new file mode 100644
index 0000000000..494e012d80
--- /dev/null
+++ b/src/codegen/aarch64/Assemble.zig
@@ -0,0 +1,1682 @@
+source: [*:0]const u8,
+operands: std.StringHashMapUnmanaged(Operand),
+
+pub const Operand = union(enum) {
+ register: aarch64.encoding.Register,
+};
+
+pub fn nextInstruction(as: *Assemble) !?Instruction {
+ @setEvalBranchQuota(42_000);
+ comptime var ct_token_buf: [token_buf_len]u8 = undefined;
+ var token_buf: [token_buf_len]u8 = undefined;
+ const original_source = while (true) {
+ const original_source = as.source;
+ const source_token = try as.nextToken(&token_buf, .{});
+ switch (source_token.len) {
+ 0 => return null,
+ else => switch (source_token[0]) {
+ else => break original_source,
+ '\n', ';' => {},
+ },
+ }
+ };
+ log.debug(
+ \\.
+ \\=========================
+ \\= Assembling "{f}"
+ \\=========================
+ \\
+ , .{std.zig.fmtString(std.mem.span(original_source))});
+ inline for (instructions) |instruction| {
+ next_pattern: {
+ as.source = original_source;
+ var symbols: Symbols: {
+ const symbols = @typeInfo(@TypeOf(instruction.symbols)).@"struct".fields;
+ var symbol_fields: [symbols.len]std.builtin.Type.StructField = undefined;
+ for (&symbol_fields, symbols) |*symbol_field, symbol| symbol_field.* = .{
+ .name = symbol.name,
+ .type = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage(),
+ .default_value_ptr = null,
+ .is_comptime = false,
+ .alignment = 0,
+ };
+ break :Symbols @Type(.{ .@"struct" = .{
+ .layout = .auto,
+ .fields = &symbol_fields,
+ .decls = &.{},
+ .is_tuple = false,
+ } });
+ } = undefined;
+ comptime var pattern_as: Assemble = .{ .source = instruction.pattern, .operands = undefined };
+ inline while (true) {
+ const pattern_token = comptime pattern_as.nextToken(&ct_token_buf, .{ .placeholders = true }) catch |err|
+ @compileError(@errorName(err) ++ " while parsing '" ++ instruction.pattern ++ "'");
+ const source_token = try as.nextToken(&token_buf, .{ .operands = true });
+ log.debug("\"{f}\" -> \"{f}\"", .{
+ std.zig.fmtString(pattern_token),
+ std.zig.fmtString(source_token),
+ });
+ if (pattern_token.len == 0) {
+ switch (source_token.len) {
+ 0 => {},
+ else => switch (source_token[0]) {
+ else => break :next_pattern,
+ '\n', ';' => {},
+ },
+ }
+ const encode = @field(Instruction, @tagName(instruction.encode[0]));
+ const Encode = @TypeOf(encode);
+ var args: std.meta.ArgsTuple(Encode) = undefined;
+ inline for (&args, @typeInfo(Encode).@"fn".params, 1..instruction.encode.len) |*arg, param, encode_index|
+ arg.* = zonCast(param.type.?, instruction.encode[encode_index], symbols);
+ return @call(.auto, encode, args);
+ } else if (pattern_token[0] == '<') {
+ const symbol_name = comptime pattern_token[1 .. std.mem.indexOfScalarPos(u8, pattern_token, 1, '|') orelse
+ pattern_token.len - 1];
+ const symbol = &@field(symbols, symbol_name);
+ symbol.* = zonCast(SymbolSpec, @field(instruction.symbols, symbol_name), .{}).parse(source_token) orelse break :next_pattern;
+ log.debug("{s} = {any}", .{ symbol_name, symbol.* });
+ } else if (!toUpperEqlAssertUpper(source_token, pattern_token)) break :next_pattern;
+ }
+ }
+ log.debug("'{s}' not matched...", .{instruction.pattern});
+ }
+ as.source = original_source;
+ log.debug("Nothing matched!\n", .{});
+ return error.InvalidSyntax;
+}
+
+fn zonCast(comptime Result: type, zon_value: anytype, symbols: anytype) Result {
+ const ZonValue = @TypeOf(zon_value);
+ const Symbols = @TypeOf(symbols);
+ switch (@typeInfo(ZonValue)) {
+ .void, .bool, .int, .float, .pointer, .comptime_float, .comptime_int, .@"enum" => return zon_value,
+ .@"struct" => |zon_struct| switch (@typeInfo(Result)) {
+ .@"struct" => |result_struct| {
+ comptime var used_zon_fields = 0;
+ var result: Result = undefined;
+ inline for (result_struct.fields) |result_field| @field(result, result_field.name) = if (@hasField(ZonValue, result_field.name)) result: {
+ used_zon_fields += 1;
+ break :result zonCast(@FieldType(Result, result_field.name), @field(zon_value, result_field.name), symbols);
+ } else result_field.defaultValue() orelse @compileError(std.fmt.comptimePrint("missing zon field '{s}': {} <- {any}", .{ result_field.name, Result, zon_value }));
+ if (used_zon_fields != zon_struct.fields.len) @compileError(std.fmt.comptimePrint("unused zon field: {} <- {any}", .{ Result, zon_value }));
+ return result;
+ },
+ .@"union" => {
+ if (zon_struct.fields.len != 1) @compileError(std.fmt.comptimePrint("{} <- {any}", .{ Result, zon_value }));
+ const field_name = zon_struct.fields[0].name;
+ return @unionInit(
+ Result,
+ field_name,
+ zonCast(@FieldType(Result, field_name), @field(zon_value, field_name), symbols),
+ );
+ },
+ else => @compileError(std.fmt.comptimePrint("unsupported zon type: {} <- {any}", .{ Result, zon_value })),
+ },
+ .enum_literal => if (@hasField(Symbols, @tagName(zon_value))) {
+ const symbol = @field(symbols, @tagName(zon_value));
+ const Symbol = @TypeOf(symbol);
+ switch (@typeInfo(Result)) {
+ .@"enum" => switch (@typeInfo(Symbol)) {
+ .int => |symbol_int| {
+ var buf: [
+ std.fmt.count("{d}", .{switch (symbol_int.signedness) {
+ .signed => std.math.minInt(Symbol),
+ .unsigned => std.math.maxInt(Symbol),
+ }})
+ ]u8 = undefined;
+ return std.meta.stringToEnum(Result, std.fmt.bufPrint(&buf, "{d}", .{symbol}) catch unreachable).?;
+ },
+ else => return symbol,
+ },
+ else => return symbol,
+ }
+ } else return if (@hasDecl(Result, @tagName(zon_value))) @field(Result, @tagName(zon_value)) else zon_value,
+ else => @compileError(std.fmt.comptimePrint("unsupported zon type: {} <- {any}", .{ Result, zon_value })),
+ }
+}
+
+fn toUpperEqlAssertUpper(lhs: []const u8, rhs: []const u8) bool {
+ if (lhs.len != rhs.len) return false;
+ for (lhs, rhs) |l, r| {
+ assert(!std.ascii.isLower(r));
+ if (std.ascii.toUpper(l) != r) return false;
+ }
+ return true;
+}
+
+const token_buf_len = "v31.b[15]".len;
+fn nextToken(as: *Assemble, buf: *[token_buf_len]u8, comptime opts: struct {
+ operands: bool = false,
+ placeholders: bool = false,
+}) ![]const u8 {
+ const invalid_syntax: u8 = 1;
+ while (true) c: switch (as.source[0]) {
+ 0 => return as.source[0..0],
+ '\t', '\n' + 1...'\r', ' ' => as.source = as.source[1..],
+ '\n', '!', '#', ',', ';', '[', ']' => {
+ defer as.source = as.source[1..];
+ return as.source[0..1];
+ },
+ '%' => if (opts.operands) {
+ if (as.source[1] != '[') continue :c invalid_syntax;
+ const name_start: usize = 2;
+ var index = name_start;
+ while (switch (as.source[index]) {
+ else => true,
+ ':', ']' => false,
+ }) index += 1;
+ const operand = as.operands.get(as.source[name_start..index]) orelse continue :c invalid_syntax;
+ const modifier = modifier: switch (as.source[index]) {
+ else => unreachable,
+ ':' => {
+ index += 1;
+ const modifier_start = index;
+ while (switch (as.source[index]) {
+ else => true,
+ ']' => false,
+ }) index += 1;
+ break :modifier as.source[modifier_start..index];
+ },
+ ']' => "",
+ };
+ assert(as.source[index] == ']');
+ const modified_operand: Operand = if (std.mem.eql(u8, modifier, ""))
+ operand
+ else if (std.mem.eql(u8, modifier, "w")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.w() },
+ } else if (std.mem.eql(u8, modifier, "x")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.x() },
+ } else if (std.mem.eql(u8, modifier, "b")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.b() },
+ } else if (std.mem.eql(u8, modifier, "h")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.h() },
+ } else if (std.mem.eql(u8, modifier, "s")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.s() },
+ } else if (std.mem.eql(u8, modifier, "d")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.d() },
+ } else if (std.mem.eql(u8, modifier, "q")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.q() },
+ } else if (std.mem.eql(u8, modifier, "Z")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.z() },
+ } else continue :c invalid_syntax;
+ switch (modified_operand) {
+ .register => |reg| {
+ as.source = as.source[index + 1 ..];
+ return std.fmt.bufPrint(buf, "{f}", .{reg.fmt()}) catch unreachable;
+ },
+ }
+ } else continue :c invalid_syntax,
+ '-', '0'...'9', 'A'...'Z', '_', 'a'...'z' => {
+ var index: usize = 1;
+ while (switch (as.source[index]) {
+ '0'...'9', 'A'...'Z', '_', 'a'...'z' => true,
+ else => false,
+ }) index += 1;
+ defer as.source = as.source[index..];
+ return as.source[0..index];
+ },
+ '<' => if (opts.placeholders) {
+ var index: usize = 1;
+ while (switch (as.source[index]) {
+ 0 => return error.UnterminatedPlaceholder,
+ '>' => false,
+ else => true,
+ }) index += 1;
+ defer as.source = as.source[index + 1 ..];
+ return as.source[0 .. index + 1];
+ } else continue :c invalid_syntax,
+ else => {
+ if (!@inComptime()) log.debug("invalid token \"{f}\"", .{std.zig.fmtString(std.mem.span(as.source))});
+ return error.InvalidSyntax;
+ },
+ };
+}
+
+const SymbolSpec = union(enum) {
+ reg: struct { format: aarch64.encoding.Register.Format, allow_sp: bool = false },
+ systemreg,
+ imm: struct {
+ type: std.builtin.Type.Int,
+ multiple_of: comptime_int = 1,
+ max_valid: ?comptime_int = null,
+ },
+ extend: struct { size: aarch64.encoding.Register.IntegerSize },
+ shift: struct { allow_ror: bool = true },
+ barrier: struct { only_sy: bool = false },
+
+ fn Storage(comptime spec: SymbolSpec) type {
+ return switch (spec) {
+ .reg => aarch64.encoding.Register,
+ .systemreg => aarch64.encoding.Register.System,
+ .imm => |imm| @Type(.{ .int = imm.type }),
+ .extend => Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ .shift => Instruction.DataProcessingRegister.Shift.Op,
+ .barrier => Instruction.BranchExceptionGeneratingSystem.Barriers.Option,
+ };
+ }
+
+ fn parse(comptime spec: SymbolSpec, token: []const u8) ?Storage(spec) {
+ const Result = Storage(spec);
+ switch (spec) {
+ .reg => |reg_spec| {
+ const reg = Result.parse(token) orelse {
+ log.debug("invalid register: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (reg.format.integer != reg_spec.format.integer) {
+ log.debug("invalid register size: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ if (reg.alias == if (reg_spec.allow_sp) .zr else .sp) {
+ log.debug("invalid register usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return reg;
+ },
+ .systemreg => {
+ const systemreg = Result.parse(token) orelse {
+ log.debug("invalid system register: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ assert(systemreg.op0 >= 2);
+ return systemreg;
+ },
+ .imm => |imm_spec| {
+ const imm = std.fmt.parseInt(Result, token, 0) catch {
+ log.debug("invalid immediate: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (@rem(imm, imm_spec.multiple_of) != 0) {
+ log.debug("invalid immediate usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ if (imm_spec.max_valid) |max_valid| if (imm > max_valid) {
+ log.debug("out of range immediate: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ return imm;
+ },
+ .extend => |extend_spec| {
+ const Option = Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(Option).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const extend = std.meta.stringToEnum(Option, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid extend: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (extend.sf() != extend_spec.size) {
+ log.debug("invalid extend: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return extend;
+ },
+ .shift => |shift_spec| {
+ const ShiftOp = Instruction.DataProcessingRegister.Shift.Op;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(ShiftOp).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const shift = std.meta.stringToEnum(ShiftOp, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid shift: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (!shift_spec.allow_ror and shift == .ror) {
+ log.debug("invalid shift usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return shift;
+ },
+ .barrier => |barrier_spec| {
+ const Option = Instruction.BranchExceptionGeneratingSystem.Barriers.Option;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(Option).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const barrier = std.meta.stringToEnum(Option, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid barrier: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (barrier_spec.only_sy and barrier != .sy) {
+ log.debug("invalid barrier: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return barrier;
+ },
+ }
+ }
+};
+
+test "add sub" {
+ var as: Assemble = .{
+ .source =
+ \\ add w0, w0, w1
+ \\ add w2, w3, w4
+ \\ add wsp, w5, w6
+ \\ add w7, wsp, w8
+ \\ add wsp, wsp, w9
+ \\ add w10, w10, wzr
+ \\ add w11, w12, wzr
+ \\ add wsp, w13, wzr
+ \\ add w14, wsp, wzr
+ \\ add wsp, wsp, wzr
+ \\
+ \\ add x0, x0, x1
+ \\ add x2, x3, x4
+ \\ add sp, x5, x6
+ \\ add x7, sp, x8
+ \\ add sp, sp, x9
+ \\ add x10, x10, xzr
+ \\ add x11, x12, xzr
+ \\ add sp, x13, xzr
+ \\ add x14, sp, xzr
+ \\ add sp, sp, xzr
+ \\
+ \\ add w0, w0, w1
+ \\ add w2, w3, w4, uxtb #0
+ \\ add wsp, w5, w6, uxth #1
+ \\ add w7, wsp, w8, uxtw #0
+ \\ add wsp, wsp, w9, uxtw #2
+ \\ add w10, w10, wzr, uxtw #3
+ \\ add w11, w12, wzr, sxtb #4
+ \\ add wsp, w13, wzr, sxth #0
+ \\ add w14, wsp, wzr, sxtw #1
+ \\ add wsp, wsp, wzr, sxtw #2
+ \\
+ \\ add x0, x0, x1
+ \\ add x2, x3, w4, uxtb #0
+ \\ add sp, x5, w6, uxth #1
+ \\ add x7, sp, w8, uxtw #2
+ \\ add sp, sp, x9, uxtx #0
+ \\ add x10, x10, xzr, uxtx #3
+ \\ add x11, x12, wzr, sxtb #4
+ \\ add sp, x13, wzr, sxth #0
+ \\ add x14, sp, wzr, sxtw #1
+ \\ add sp, sp, xzr, sxtx #2
+ \\
+ \\ add w0, w0, #0
+ \\ add w0, w1, #1, lsl #0
+ \\ add wsp, w2, #2, lsl #12
+ \\ add w3, wsp, #3, lsl #0
+ \\ add wsp, wsp, #4095, lsl #12
+ \\ add w0, w1, #0
+ \\ add w2, w3, #0, lsl #0
+ \\ add w4, wsp, #0
+ \\ add w5, wsp, #0, lsl #0
+ \\ add wsp, w6, #0
+ \\ add wsp, w7, #0, lsl #0
+ \\ add wsp, wsp, #0
+ \\ add wsp, wsp, #0, lsl #0
+ \\
+ \\ add x0, x0, #0
+ \\ add x0, x1, #1, lsl #0
+ \\ add sp, x2, #2, lsl #12
+ \\ add x3, sp, #3, lsl #0
+ \\ add sp, sp, #4095, lsl #12
+ \\ add x0, x1, #0
+ \\ add x2, x3, #0, lsl #0
+ \\ add x4, sp, #0
+ \\ add x5, sp, #0, lsl #0
+ \\ add sp, x6, #0
+ \\ add sp, x7, #0, lsl #0
+ \\ add sp, sp, #0
+ \\ add sp, sp, #0, lsl #0
+ \\
+ \\ add w0, w0, w0
+ \\ add w1, w1, w2, lsl #0
+ \\ add w3, w4, w5, lsl #1
+ \\ add w6, w6, wzr, lsl #31
+ \\ add w7, wzr, w8, lsr #0
+ \\ add w9, wzr, wzr, lsr #30
+ \\ add wzr, w10, w11, lsr #31
+ \\ add wzr, w12, wzr, asr #0x0
+ \\ add wzr, wzr, w13, asr #0x10
+ \\ add wzr, wzr, wzr, asr #0x1f
+ \\
+ \\ add x0, x0, x0
+ \\ add x1, x1, x2, lsl #0
+ \\ add x3, x4, x5, lsl #1
+ \\ add x6, x6, xzr, lsl #63
+ \\ add x7, xzr, x8, lsr #0
+ \\ add x9, xzr, xzr, lsr #62
+ \\ add xzr, x10, x11, lsr #63
+ \\ add xzr, x12, xzr, asr #0x0
+ \\ add xzr, xzr, x13, asr #0x1F
+ \\ add xzr, xzr, xzr, asr #0x3f
+ \\
+ \\ sub w0, w0, w1
+ \\ sub w2, w3, w4
+ \\ sub wsp, w5, w6
+ \\ sub w7, wsp, w8
+ \\ sub wsp, wsp, w9
+ \\ sub w10, w10, wzr
+ \\ sub w11, w12, wzr
+ \\ sub wsp, w13, wzr
+ \\ sub w14, wsp, wzr
+ \\ sub wsp, wsp, wzr
+ \\
+ \\ sub x0, x0, x1
+ \\ sub x2, x3, x4
+ \\ sub sp, x5, x6
+ \\ sub x7, sp, x8
+ \\ sub sp, sp, x9
+ \\ sub x10, x10, xzr
+ \\ sub x11, x12, xzr
+ \\ sub sp, x13, xzr
+ \\ sub x14, sp, xzr
+ \\ sub sp, sp, xzr
+ \\
+ \\ sub w0, w0, w1
+ \\ sub w2, w3, w4, uxtb #0
+ \\ sub wsp, w5, w6, uxth #1
+ \\ sub w7, wsp, w8, uxtw #0
+ \\ sub wsp, wsp, w9, uxtw #2
+ \\ sub w10, w10, wzr, uxtw #3
+ \\ sub w11, w12, wzr, sxtb #4
+ \\ sub wsp, w13, wzr, sxth #0
+ \\ sub w14, wsp, wzr, sxtw #1
+ \\ sub wsp, wsp, wzr, sxtw #2
+ \\
+ \\ sub x0, x0, x1
+ \\ sub x2, x3, w4, uxtb #0
+ \\ sub sp, x5, w6, uxth #1
+ \\ sub x7, sp, w8, uxtw #2
+ \\ sub sp, sp, x9, uxtx #0
+ \\ sub x10, x10, xzr, uxtx #3
+ \\ sub x11, x12, wzr, sxtb #4
+ \\ sub sp, x13, wzr, sxth #0
+ \\ sub x14, sp, wzr, sxtw #1
+ \\ sub sp, sp, xzr, sxtx #2
+ \\
+ \\ sub w0, w0, #0
+ \\ sub w0, w1, #1, lsl #0
+ \\ sub wsp, w2, #2, lsl #12
+ \\ sub w3, wsp, #3, lsl #0
+ \\ sub wsp, wsp, #4095, lsl #12
+ \\ sub w0, w1, #0
+ \\ sub w2, w3, #0, lsl #0
+ \\ sub w4, wsp, #0
+ \\ sub w5, wsp, #0, lsl #0
+ \\ sub wsp, w6, #0
+ \\ sub wsp, w7, #0, lsl #0
+ \\ sub wsp, wsp, #0
+ \\ sub wsp, wsp, #0, lsl #0
+ \\
+ \\ sub x0, x0, #0
+ \\ sub x0, x1, #1, lsl #0
+ \\ sub sp, x2, #2, lsl #12
+ \\ sub x3, sp, #3, lsl #0
+ \\ sub sp, sp, #4095, lsl #12
+ \\ sub x0, x1, #0
+ \\ sub x2, x3, #0, lsl #0
+ \\ sub x4, sp, #0
+ \\ sub x5, sp, #0, lsl #0
+ \\ sub sp, x6, #0
+ \\ sub sp, x7, #0, lsl #0
+ \\ sub sp, sp, #0
+ \\ sub sp, sp, #0, lsl #0
+ \\
+ \\ sub w0, w0, w0
+ \\ sub w1, w1, w2, lsl #0
+ \\ sub w3, w4, w5, lsl #1
+ \\ sub w6, w6, wzr, lsl #31
+ \\ sub w7, wzr, w8, lsr #0
+ \\ sub w9, wzr, wzr, lsr #30
+ \\ sub wzr, w10, w11, lsr #31
+ \\ sub wzr, w12, wzr, asr #0x0
+ \\ sub wzr, wzr, w13, asr #0x10
+ \\ sub wzr, wzr, wzr, asr #0x1f
+ \\
+ \\ sub x0, x0, x0
+ \\ sub x1, x1, x2, lsl #0
+ \\ sub x3, x4, x5, lsl #1
+ \\ sub x6, x6, xzr, lsl #63
+ \\ sub x7, xzr, x8, lsr #0
+ \\ sub x9, xzr, xzr, lsr #62
+ \\ sub xzr, x10, x11, lsr #63
+ \\ sub xzr, x12, xzr, asr #0x0
+ \\ sub xzr, xzr, x13, asr #0x1F
+ \\ sub xzr, xzr, xzr, asr #0x3f
+ \\
+ \\ neg w0, w0
+ \\ neg w1, w2, lsl #0
+ \\ neg w3, wzr, lsl #7
+ \\ neg wzr, w4, lsr #14
+ \\ neg wzr, wzr, asr #21
+ \\
+ \\ neg x0, x0
+ \\ neg x1, x2, lsl #0
+ \\ neg x3, xzr, lsl #11
+ \\ neg xzr, x4, lsr #22
+ \\ neg xzr, xzr, asr #33
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("add w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w5, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, w9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w10, w10, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w11, w12, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w13, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w14, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x5, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, sp, x8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x10, x10, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x11, x12, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x13, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x14, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, w9, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w10, w10, wzr, uxtw #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w11, w12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w14, wsp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, wzr, sxtw #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, sp, w8, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x10, x10, xzr, uxtx #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x11, x12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x14, sp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, xzr, sxtx #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w0, w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w3, wsp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w0, w1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w4, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w5, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x0, x1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x3, sp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x0, x1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w5, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, w9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w10, w10, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w11, w12, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w13, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w14, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x5, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x7, sp, x8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x10, x10, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x11, x12, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x13, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x14, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, w9, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w10, w10, wzr, uxtw #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w11, w12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w14, wsp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, wzr, sxtw #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x7, sp, w8, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x10, x10, xzr, uxtx #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x11, x12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x14, sp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, xzr, sxtx #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w0, w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w3, wsp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w0, w1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w4, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w5, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w6, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w7, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x0, x1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x3, sp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x0, x1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x4, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x5, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x6, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x7, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w7, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w9, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x7, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x9, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("neg w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w3, wzr, lsl #7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, w4, lsr #14", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, wzr, asr #21", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("neg x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x3, xzr, lsl #11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, x4, lsr #22", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, xzr, asr #33", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "bitfield" {
+ var as: Assemble = .{
+ .source =
+ \\sbfm w0, w0, #0, #31
+ \\sbfm w0, w0, #31, #0
+ \\
+ \\sbfm x0, x0, #0, #63
+ \\sbfm x0, x0, #63, #0
+ \\
+ \\bfm w0, w0, #0, #31
+ \\bfm w0, w0, #31, #0
+ \\
+ \\bfm x0, x0, #0, #63
+ \\bfm x0, x0, #63, #0
+ \\
+ \\ubfm w0, w0, #0, #31
+ \\ubfm w0, w0, #31, #0
+ \\
+ \\ubfm x0, x0, #0, #63
+ \\ubfm x0, x0, #63, #0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("sbfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sbfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sbfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sbfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("bfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("bfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("bfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("bfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ubfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ubfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ubfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ubfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "branch register" {
+ var as: Assemble = .{
+ .source =
+ \\ret
+ \\br x30
+ \\blr x30
+ \\ret x30
+ \\br x29
+ \\blr x29
+ \\ret x29
+ \\br x2
+ \\blr x1
+ \\ret x0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("ret", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "exception generating" {
+ var as: Assemble = .{
+ .source =
+ \\SVC #0
+ \\HVC #0x1
+ \\SMC #0o15
+ \\BRK #42
+ \\HLT #0x42
+ \\TCANCEL #123
+ \\DCPS1 #1234
+ \\DCPS2 #12345
+ \\DCPS3 #65535
+ \\DCPS3 #0x0
+ \\DCPS2 #0
+ \\DCPS1
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("svc #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hvc #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("smc #0xd", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("brk #0x2a", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hlt #0x42", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tcancel #0x7b", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps1 #0x4d2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps2 #0x3039", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps3 #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps1", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "extract" {
+ var as: Assemble = .{
+ .source =
+ \\extr W0, W1, W2, #0
+ \\extr W3, W3, W4, #1
+ \\extr W5, W5, W5, #31
+ \\
+ \\extr X0, X1, X2, #0
+ \\extr X3, X3, X4, #1
+ \\extr X5, X5, X5, #63
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("extr w0, w1, w2, #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr w3, w3, w4, #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr w5, w5, w5, #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("extr x0, x1, x2, #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr x3, x3, x4, #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr x5, x5, x5, #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "hints" {
+ var as: Assemble = .{
+ .source =
+ \\NOP
+ \\hint #0
+ \\YiElD
+ \\Hint #0x1
+ \\WfE
+ \\hInt #02
+ \\wFi
+ \\hiNt #0b11
+ \\sEv
+ \\hinT #4
+ \\sevl
+ \\HINT #0b101
+ \\hint #0x7F
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("nop", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("nop", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("yield", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("yield", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfe", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfe", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfi", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfi", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sev", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sev", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sevl", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sevl", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hint #0x7f", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "load store" {
+ var as: Assemble = .{
+ .source =
+ \\ LDP w0, w1, [x2], #-256
+ \\ LDP w3, w4, [x5], #0
+ \\ LDP w6, w7, [sp], #252
+ \\ LDP w0, w1, [x2, #-0x100]!
+ \\ LDP w3, w4, [x5, #0]!
+ \\ LDP w6, w7, [sp, #0xfc]!
+ \\ LDP w0, w1, [x2, #-256]
+ \\ LDP w3, w4, [x5]
+ \\ LDP w6, w7, [x8, #0]
+ \\ LDP w9, w10, [sp, #252]
+ \\
+ \\ LDP x0, x1, [x2], #-512
+ \\ LDP x3, x4, [x5], #0
+ \\ LDP x6, x7, [sp], #504
+ \\ LDP x0, x1, [x2, #-0x200]!
+ \\ LDP x3, x4, [x5, #0]!
+ \\ LDP x6, x7, [sp, #0x1f8]!
+ \\ LDP x0, x1, [x2, #-512]
+ \\ LDP x3, x4, [x5]
+ \\ LDP x6, x7, [x8, #0]
+ \\ LDP x9, x10, [sp, #504]
+ \\
+ \\ LDR w0, [x1], #-256
+ \\ LDR w2, [x3], #0
+ \\ LDR w4, [sp], #255
+ \\ LDR w0, [x1, #-0x100]!
+ \\ LDR w2, [x3, #0]!
+ \\ LDR w4, [sp, #0xff]!
+ \\ LDR w0, [x1, #0]
+ \\ LDR w2, [x3]
+ \\ LDR w4, [sp, #16380]
+ \\
+ \\ LDR x0, [x1], #-256
+ \\ LDR x2, [x3], #0
+ \\ LDR x4, [sp], #255
+ \\ LDR x0, [x1, #-0x100]!
+ \\ LDR x2, [x3, #0]!
+ \\ LDR x4, [sp, #0xff]!
+ \\ LDR x0, [x1, #0]
+ \\ LDR x2, [x3]
+ \\ LDR x4, [sp, #32760]
+ \\
+ \\ STP w0, w1, [x2], #-256
+ \\ STP w3, w4, [x5], #0
+ \\ STP w6, w7, [sp], #252
+ \\ STP w0, w1, [x2, #-0x100]!
+ \\ STP w3, w4, [x5, #0]!
+ \\ STP w6, w7, [sp, #0xfc]!
+ \\ STP w0, w1, [x2, #-256]
+ \\ STP w3, w4, [x5]
+ \\ STP w6, w7, [x8, #0]
+ \\ STP w9, w10, [sp, #252]
+ \\
+ \\ STP x0, x1, [x2], #-512
+ \\ STP x3, x4, [x5], #0
+ \\ STP x6, x7, [sp], #504
+ \\ STP x0, x1, [x2, #-0x200]!
+ \\ STP x3, x4, [x5, #0]!
+ \\ STP x6, x7, [sp, #0x1f8]!
+ \\ STP x0, x1, [x2, #-512]
+ \\ STP x3, x4, [x5]
+ \\ STP x6, x7, [x8, #0]
+ \\ STP x9, x10, [sp, #504]
+ \\
+ \\ STR w0, [x1], #-256
+ \\ STR w2, [x3], #0
+ \\ STR w4, [sp], #255
+ \\ STR w0, [x1, #-0x100]!
+ \\ STR w2, [x3, #0]!
+ \\ STR w4, [sp, #0xff]!
+ \\ STR w0, [x1, #0]
+ \\ STR w2, [x3]
+ \\ STR w4, [sp, #16380]
+ \\
+ \\ STR x0, [x1], #-256
+ \\ STR x2, [x3], #0
+ \\ STR x4, [sp], #255
+ \\ STR x0, [x1, #-0x100]!
+ \\ STR x2, [x3, #0]!
+ \\ STR x4, [sp, #0xff]!
+ \\ STR x0, [x1, #0]
+ \\ STR x2, [x3]
+ \\ STR x4, [sp, #32760]
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("ldp w0, w1, [x2], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [sp], #0xfc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w0, w1, [x2, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [sp, #0xfc]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w0, w1, [x2, #-0x100]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w9, w10, [sp, #0xfc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldp x0, x1, [x2], #-0x200", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [sp], #0x1f8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x0, x1, [x2, #-0x200]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [sp, #0x1f8]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x0, x1, [x2, #-0x200]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x9, x10, [sp, #0x1f8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldr w0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp, #0x3ffc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldr x0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp, #0x7ff8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("stp w0, w1, [x2], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [sp], #0xfc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w0, w1, [x2, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [sp, #0xfc]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w0, w1, [x2, #-0x100]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w9, w10, [sp, #0xfc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("stp x0, x1, [x2], #-0x200", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [sp], #0x1f8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x0, x1, [x2, #-0x200]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [sp, #0x1f8]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x0, x1, [x2, #-0x200]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x9, x10, [sp, #0x1f8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("str w0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp, #0x3ffc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("str x0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp, #0x7ff8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "logical" {
+ var as: Assemble = .{
+ .source =
+ \\ and w0, w0, w0
+ \\ and w1, w1, w2, lsl #0
+ \\ and w3, w4, w5, lsl #1
+ \\ and w6, w6, wzr, lsl #31
+ \\ and w7, wzr, w8, lsr #0
+ \\ and w9, wzr, wzr, lsr #30
+ \\ and wzr, w10, w11, lsr #31
+ \\ and wzr, w12, wzr, asr #0x0
+ \\ and wzr, wzr, w13, asr #0x10
+ \\ and wzr, wzr, wzr, asr #0x1f
+ \\ and w0, w0, wzr
+ \\ and w1, w2, wzr, lsl #0
+ \\ and w3, wzr, w3
+ \\ and w4, wzr, w5, lsl #0
+ \\ and w6, wzr, wzr
+ \\ and w7, wzr, wzr, lsl #0
+ \\ and wzr, w8, wzr
+ \\ and wzr, w9, wzr, lsl #0
+ \\ and wzr, wzr, w10
+ \\ and wzr, wzr, w11, lsl #0
+ \\ and wzr, wzr, wzr
+ \\ and wzr, wzr, wzr, lsl #0
+ \\
+ \\ and x0, x0, x0
+ \\ and x1, x1, x2, lsl #0
+ \\ and x3, x4, x5, lsl #1
+ \\ and x6, x6, xzr, lsl #63
+ \\ and x7, xzr, x8, lsr #0
+ \\ and x9, xzr, xzr, lsr #62
+ \\ and xzr, x10, x11, lsr #63
+ \\ and xzr, x12, xzr, asr #0x0
+ \\ and xzr, xzr, x13, asr #0x1F
+ \\ and xzr, xzr, xzr, asr #0x3f
+ \\ and x0, x0, xzr
+ \\ and x1, x2, xzr, lsl #0
+ \\ and x3, xzr, x3
+ \\ and x4, xzr, x5, lsl #0
+ \\ and x6, xzr, xzr
+ \\ and x7, xzr, xzr, lsl #0
+ \\ and xzr, x8, xzr
+ \\ and xzr, x9, xzr, lsl #0
+ \\ and xzr, xzr, x10
+ \\ and xzr, xzr, x11, lsl #0
+ \\ and xzr, xzr, xzr
+ \\ and xzr, xzr, xzr, lsl #0
+ \\
+ \\ orr w0, w0, w0
+ \\ orr w1, w1, w2, lsl #0
+ \\ orr w3, w4, w5, lsl #1
+ \\ orr w6, w6, wzr, lsl #31
+ \\ orr w7, wzr, w8, lsr #0
+ \\ orr w9, wzr, wzr, lsr #30
+ \\ orr wzr, w10, w11, lsr #31
+ \\ orr wzr, w12, wzr, asr #0x0
+ \\ orr wzr, wzr, w13, asr #0x10
+ \\ orr wzr, wzr, wzr, asr #0x1f
+ \\ orr w0, w0, wzr
+ \\ orr w1, w2, wzr, lsl #0
+ \\ orr w3, wzr, w3
+ \\ orr w4, wzr, w5, lsl #0
+ \\ orr w6, wzr, wzr
+ \\ orr w7, wzr, wzr, lsl #0
+ \\ orr wzr, w8, wzr
+ \\ orr wzr, w9, wzr, lsl #0
+ \\ orr wzr, wzr, w10
+ \\ orr wzr, wzr, w11, lsl #0
+ \\ orr wzr, wzr, wzr
+ \\ orr wzr, wzr, wzr, lsl #0
+ \\
+ \\ orr x0, x0, x0
+ \\ orr x1, x1, x2, lsl #0
+ \\ orr x3, x4, x5, lsl #1
+ \\ orr x6, x6, xzr, lsl #63
+ \\ orr x7, xzr, x8, lsr #0
+ \\ orr x9, xzr, xzr, lsr #62
+ \\ orr xzr, x10, x11, lsr #63
+ \\ orr xzr, x12, xzr, asr #0x0
+ \\ orr xzr, xzr, x13, asr #0x1F
+ \\ orr xzr, xzr, xzr, asr #0x3f
+ \\ orr x0, x0, xzr
+ \\ orr x1, x2, xzr, lsl #0
+ \\ orr x3, xzr, x3
+ \\ orr x4, xzr, x5, lsl #0
+ \\ orr x6, xzr, xzr
+ \\ orr x7, xzr, xzr, lsl #0
+ \\ orr xzr, x8, xzr
+ \\ orr xzr, x9, xzr, lsl #0
+ \\ orr xzr, xzr, x10
+ \\ orr xzr, xzr, x11, lsl #0
+ \\ orr xzr, xzr, xzr
+ \\ orr xzr, xzr, xzr, lsl #0
+ \\
+ \\ eor w0, w0, w0
+ \\ eor w1, w1, w2, lsl #0
+ \\ eor w3, w4, w5, lsl #1
+ \\ eor w6, w6, wzr, lsl #31
+ \\ eor w7, wzr, w8, lsr #0
+ \\ eor w9, wzr, wzr, lsr #30
+ \\ eor wzr, w10, w11, lsr #31
+ \\ eor wzr, w12, wzr, asr #0x0
+ \\ eor wzr, wzr, w13, asr #0x10
+ \\ eor wzr, wzr, wzr, asr #0x1f
+ \\ eor w0, w0, wzr
+ \\ eor w1, w2, wzr, lsl #0
+ \\ eor w3, wzr, w3
+ \\ eor w4, wzr, w5, lsl #0
+ \\ eor w6, wzr, wzr
+ \\ eor w7, wzr, wzr, lsl #0
+ \\ eor wzr, w8, wzr
+ \\ eor wzr, w9, wzr, lsl #0
+ \\ eor wzr, wzr, w10
+ \\ eor wzr, wzr, w11, lsl #0
+ \\ eor wzr, wzr, wzr
+ \\ eor wzr, wzr, wzr, lsl #0
+ \\
+ \\ eor x0, x0, x0
+ \\ eor x1, x1, x2, lsl #0
+ \\ eor x3, x4, x5, lsl #1
+ \\ eor x6, x6, xzr, lsl #63
+ \\ eor x7, xzr, x8, lsr #0
+ \\ eor x9, xzr, xzr, lsr #62
+ \\ eor xzr, x10, x11, lsr #63
+ \\ eor xzr, x12, xzr, asr #0x0
+ \\ eor xzr, xzr, x13, asr #0x1F
+ \\ eor xzr, xzr, xzr, asr #0x3f
+ \\ eor x0, x0, xzr
+ \\ eor x1, x2, xzr, lsl #0
+ \\ eor x3, xzr, x3
+ \\ eor x4, xzr, x5, lsl #0
+ \\ eor x6, xzr, xzr
+ \\ eor x7, xzr, xzr, lsl #0
+ \\ eor xzr, x8, xzr
+ \\ eor xzr, x9, xzr, lsl #0
+ \\ eor xzr, xzr, x10
+ \\ eor xzr, xzr, x11, lsl #0
+ \\ eor xzr, xzr, xzr
+ \\ eor xzr, xzr, xzr, lsl #0
+ \\
+ \\ ands w0, w0, w0
+ \\ ands w1, w1, w2, lsl #0
+ \\ ands w3, w4, w5, lsl #1
+ \\ ands w6, w6, wzr, lsl #31
+ \\ ands w7, wzr, w8, lsr #0
+ \\ ands w9, wzr, wzr, lsr #30
+ \\ ands wzr, w10, w11, lsr #31
+ \\ ands wzr, w12, wzr, asr #0x0
+ \\ ands wzr, wzr, w13, asr #0x10
+ \\ ands wzr, wzr, wzr, asr #0x1f
+ \\ ands w0, w0, wzr
+ \\ ands w1, w2, wzr, lsl #0
+ \\ ands w3, wzr, w3
+ \\ ands w4, wzr, w5, lsl #0
+ \\ ands w6, wzr, wzr
+ \\ ands w7, wzr, wzr, lsl #0
+ \\ ands wzr, w8, wzr
+ \\ ands wzr, w9, wzr, lsl #0
+ \\ ands wzr, wzr, w10
+ \\ ands wzr, wzr, w11, lsl #0
+ \\ ands wzr, wzr, wzr
+ \\ ands wzr, wzr, wzr, lsl #0
+ \\
+ \\ ands x0, x0, x0
+ \\ ands x1, x1, x2, lsl #0
+ \\ ands x3, x4, x5, lsl #1
+ \\ ands x6, x6, xzr, lsl #63
+ \\ ands x7, xzr, x8, lsr #0
+ \\ ands x9, xzr, xzr, lsr #62
+ \\ ands xzr, x10, x11, lsr #63
+ \\ ands xzr, x12, xzr, asr #0x0
+ \\ ands xzr, xzr, x13, asr #0x1F
+ \\ ands xzr, xzr, xzr, asr #0x3f
+ \\ ands x0, x0, xzr
+ \\ ands x1, x2, xzr, lsl #0
+ \\ ands x3, xzr, x3
+ \\ ands x4, xzr, x5, lsl #0
+ \\ ands x6, xzr, xzr
+ \\ ands x7, xzr, xzr, lsl #0
+ \\ ands xzr, x8, xzr
+ \\ ands xzr, x9, xzr, lsl #0
+ \\ ands xzr, xzr, x10
+ \\ ands xzr, xzr, x11, lsl #0
+ \\ ands xzr, xzr, xzr
+ \\ ands xzr, xzr, xzr, lsl #0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("and w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("and x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("orr w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w3, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w4, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w6, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w7, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("orr x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("eor w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("eor x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ands w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ands x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "mov" {
+ var as: Assemble = .{
+ .source =
+ \\MOV W0, #0
+ \\MOV WZR, #0xffff
+ \\
+ \\MOV X0, #0
+ \\MOV XZR, #0xffff
+ \\
+ \\MOV W0, WSP
+ \\MOV WSP, W1
+ \\MOV WSP, WSP
+ \\MOV X0, SP
+ \\MOV SP, X1
+ \\MOV SP, SP
+ \\
+ \\MOV W0, W0
+ \\MOV W1, W2
+ \\MOV W3, WZR
+ \\MOV WZR, W4
+ \\MOV WZR, WZR
+ \\MOV X0, X0
+ \\MOV X1, X2
+ \\MOV X3, XZR
+ \\MOV XZR, X4
+ \\MOV XZR, XZR
+ \\
+ \\MOVK W0, #0
+ \\MOVK W1, #1, lsl #0
+ \\MOVK W2, #2, lsl #16
+ \\MOVK X3, #3
+ \\MOVK X4, #4, lsl #0x00
+ \\MOVK X5, #5, lsl #0x10
+ \\MOVK X6, #6, lsl #0x20
+ \\MOVK X7, #7, lsl #0x30
+ \\
+ \\MOVN W0, #8
+ \\MOVN W1, #9, lsl #0
+ \\MOVN W2, #10, lsl #16
+ \\MOVN X3, #11
+ \\MOVN X4, #12, lsl #0x00
+ \\MOVN X5, #13, lsl #0x10
+ \\MOVN X6, #14, lsl #0x20
+ \\MOVN X7, #15, lsl #0x30
+ \\
+ \\MOVN WZR, #0, lsl #0
+ \\MOVN WZR, #0, lsl #16
+ \\MOVN XZR, #0, lsl #0
+ \\MOVN XZR, #0, lsl #16
+ \\MOVN XZR, #0, lsl #32
+ \\MOVN XZR, #0, lsl #48
+ \\
+ \\MOVN WZR, #0xffff, lsl #0
+ \\MOVN WZR, #0xffff, lsl #16
+ \\MOVN XZR, #0xffff, lsl #0
+ \\MOVN XZR, #0xffff, lsl #16
+ \\MOVN XZR, #0xffff, lsl #32
+ \\MOVN XZR, #0xffff, lsl #48
+ \\
+ \\MOVZ W0, #16
+ \\MOVZ W1, #17, lsl #0
+ \\MOVZ W2, #18, lsl #16
+ \\MOVZ X3, #19
+ \\MOVZ X4, #20, lsl #0x00
+ \\MOVZ X5, #21, lsl #0x10
+ \\MOVZ X6, #22, lsl #0x20
+ \\MOVZ X7, #23, lsl #0x30
+ \\
+ \\MOVZ WZR, #0, lsl #0
+ \\MOVZ WZR, #0, lsl #16
+ \\MOVZ XZR, #0, lsl #0
+ \\MOVZ XZR, #0, lsl #16
+ \\MOVZ XZR, #0, lsl #32
+ \\MOVZ XZR, #0, lsl #48
+ \\
+ \\MOVZ WZR, #0xffff, lsl #0
+ \\MOVZ WZR, #0xffff, lsl #16
+ \\MOVZ XZR, #0xffff, lsl #0
+ \\MOVZ XZR, #0xffff, lsl #16
+ \\MOVZ XZR, #0xffff, lsl #32
+ \\MOVZ XZR, #0xffff, lsl #48
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("mov w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w3, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("movk w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk w2, #0x2, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x3, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x4, #0x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x5, #0x5, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x6, #0x6, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x7, #0x7, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, #-0x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, #-0xa", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w2, #-0xa0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, #-0xc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, #-0xd", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, #-0xd0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, #-0xe00000001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, #-0xf000000000001", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #-0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn wzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("movn wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn wzr, #0xffff, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x10000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0xffff0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0xffff00000001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffffffffffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, #0x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, #0x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w2, #0x120000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, #0x13", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, #0x14", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, #0x150000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, #0x1600000000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, #0x17000000000000", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz wzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, #-0x10000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff0000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff00000000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x1000000000000", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "reserved" {
+ var as: Assemble = .{
+ .source = "\n\nudf #0x0\n\t\n\tudf\t#01234\n \nudf#65535",
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("udf #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("udf #0x4d2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("udf #0xffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+
+const aarch64 = @import("../aarch64.zig");
+const Assemble = @This();
+const assert = std.debug.assert;
+const Instruction = aarch64.encoding.Instruction;
+const instructions = @import("instructions.zon");
+const std = @import("std");
+const log = std.log.scoped(.@"asm");
diff --git a/src/codegen/aarch64/Disassemble.zig b/src/codegen/aarch64/Disassemble.zig
new file mode 100644
index 0000000000..e3b4df93d4
--- /dev/null
+++ b/src/codegen/aarch64/Disassemble.zig
@@ -0,0 +1,905 @@
+case: Case = .lower,
+mnemonic_operands_separator: []const u8 = " ",
+operands_separator: []const u8 = ", ",
+enable_aliases: bool = true,
+
+pub const Case = enum { lower, upper };
+
+pub fn printInstruction(dis: Disassemble, inst: Instruction, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ unallocated: switch (inst.decode()) {
+ .unallocated => break :unallocated,
+ .reserved => |reserved| switch (reserved.decode()) {
+ .unallocated => break :unallocated,
+ .udf => |udf| return writer.print("{f}{s}#0x{x}", .{
+ fmtCase(.udf, dis.case),
+ dis.mnemonic_operands_separator,
+ udf.imm16,
+ }),
+ },
+ .sme => {},
+ .sve => {},
+ .data_processing_immediate => |data_processing_immediate| switch (data_processing_immediate.decode()) {
+ .unallocated => break :unallocated,
+ .pc_relative_addressing => |pc_relative_addressing| {
+ const group = pc_relative_addressing.group;
+ const imm = (@as(i33, group.immhi) << 2 | @as(i33, group.immlo) << 0) + @as(i33, switch (group.op) {
+ .adr => Instruction.size,
+ .adrp => 0,
+ });
+ return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(.doubleword, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ switch (group.op) {
+ .adr => @abs(imm),
+ .adrp => @abs(imm) << 12,
+ },
+ });
+ },
+ .add_subtract_immediate => |add_subtract_immediate| {
+ const group = add_subtract_immediate.group;
+ const op = group.op;
+ const S = group.S;
+ const sf = group.sf;
+ const sh = group.sh;
+ const imm12 = group.imm12;
+ const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = !S });
+ const elide_shift = sh == .@"0";
+ if (dis.enable_aliases and op == .add and S == false and elide_shift and imm12 == 0 and
+ (Rn.alias == .sp or Rd.alias == .sp)) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{s}{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(op, dis.case),
+ if (S) "s" else "",
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ imm12,
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{s}", .{
+ dis.operands_separator,
+ fmtCase(.lsl, dis.case),
+ @tagName(sh),
+ });
+ },
+ .add_subtract_immediate_with_tags => {},
+ .logical_immediate => |logical_immediate| {
+ const decoded = logical_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = logical_immediate.group;
+ const sf = group.sf;
+ const decoded_imm = group.imm.decodeImmediate(sf);
+ const imm = switch (sf) {
+ .word => @as(i32, @bitCast(@as(u32, @intCast(decoded_imm)))),
+ .doubleword => @as(i64, @bitCast(decoded_imm)),
+ };
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = decoded != .ands });
+ return if (dis.enable_aliases and decoded == .orr and Rn.alias == .zr and !group.imm.moveWidePreferred(sf)) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ }) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.tst, dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ }) else writer.print("{f}{s}{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ decoded_imm,
+ });
+ },
+ .move_wide_immediate => |move_wide_immediate| {
+ const decoded = move_wide_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = move_wide_immediate.group;
+ const sf = group.sf;
+ const hw = group.hw;
+ const imm16 = group.imm16;
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ const elide_shift = hw == .@"0";
+ if (dis.enable_aliases and switch (decoded) {
+ .unallocated => unreachable,
+ .movz => elide_shift or group.imm16 != 0,
+ .movn => (elide_shift or group.imm16 != 0) and switch (sf) {
+ .word => group.imm16 != std.math.maxInt(u16),
+ .doubleword => true,
+ },
+ .movk => false,
+ }) {
+ const decoded_imm = switch (sf) {
+ .word => @as(i32, @bitCast(@as(u32, group.imm16) << @intCast(hw.int()))),
+ .doubleword => @as(i64, @bitCast(@as(u64, group.imm16) << hw.int())),
+ };
+ const imm = switch (decoded) {
+ .unallocated => unreachable,
+ .movz => decoded_imm,
+ .movn => ~decoded_imm,
+ .movk => unreachable,
+ };
+ return writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ });
+ }
+ try writer.print("{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ imm16,
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{s}", .{
+ dis.operands_separator,
+ fmtCase(.lsl, dis.case),
+ @tagName(hw),
+ });
+ },
+ .bitfield => |bitfield| {
+ const decoded = bitfield.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = bitfield.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}#{d}{s}#{d}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.imm.immr,
+ dis.operands_separator,
+ group.imm.imms,
+ });
+ },
+ .extract => |extract| {
+ const decoded = extract.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = extract.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}{f}{s}#{d}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.imms,
+ });
+ },
+ },
+ .branch_exception_generating_system => |branch_exception_generating_system| switch (branch_exception_generating_system.decode()) {
+ .unallocated => break :unallocated,
+ .conditional_branch_immediate => |conditional_branch_immediate| {
+ const decoded = conditional_branch_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = conditional_branch_immediate.group;
+ const imm = @as(i21, group.imm19);
+ return writer.print("{f}.{f}{s}.{c}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ fmtCase(group.cond, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .exception_generating => |exception_generating| {
+ const decoded = exception_generating.decode();
+ switch (decoded) {
+ .unallocated => break :unallocated,
+ .svc, .hvc, .smc, .brk, .hlt, .tcancel => {},
+ .dcps1, .dcps2, .dcps3 => switch (exception_generating.group.imm16) {
+ 0 => return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
+ else => {},
+ },
+ }
+ return switch (exception_generating.group.imm16) {
+ 0 => writer.print("{f}{s}#0", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ }),
+ else => writer.print("{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ exception_generating.group.imm16,
+ }),
+ };
+ },
+ .system_register_argument => {},
+ .hints => |hints| switch (hints.decode()) {
+ .hint => |hint| return writer.print("{f}{s}#0x{x}", .{
+ fmtCase(.hint, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u7, hint.CRm) << 3 | @as(u7, hint.op2) << 0,
+ }),
+ else => |decoded| return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
+ },
+ .barriers => {},
+ .pstate => {},
+ .system_result => {},
+ .system => {},
+ .system_register_move => {},
+ .unconditional_branch_register => |unconditional_branch_register| {
+ const decoded = unconditional_branch_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = unconditional_branch_register.group;
+ const Rn = group.Rn.decodeInteger(.doubleword, .{});
+ try writer.print("{f}", .{fmtCase(decoded, dis.case)});
+ return if (decoded != .ret or Rn.alias != .r30) try writer.print("{s}{f}", .{
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ });
+ },
+ .unconditional_branch_immediate => |unconditional_branch_immediate| {
+ const group = unconditional_branch_immediate.group;
+ const imm = @as(i28, group.imm26);
+ return writer.print("{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .compare_branch_immediate => |compare_branch_immediate| {
+ const group = compare_branch_immediate.group;
+ const imm = @as(i21, group.imm19);
+ return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(group.sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .test_branch_immediate => |test_branch_immediate| {
+ const group = test_branch_immediate.group;
+ const imm = @as(i16, group.imm14);
+ return writer.print("{f}{s}{f}{s}#0x{d}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(@enumFromInt(group.b5), .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u6, group.b5) << 5 |
+ @as(u6, group.b40) << 0,
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ },
+ .load_store => |load_store| switch (load_store.decode()) {
+ .unallocated => break :unallocated,
+ .register_literal => {},
+ .memory => {},
+ .no_allocate_pair_offset => {},
+ .register_pair_post_indexed => |register_pair_post_indexed| switch (register_pair_post_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ },
+ },
+ .register_pair_offset => |register_pair_offset| switch (register_pair_offset.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ return writer.writeByte(']');
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ return writer.writeByte(']');
+ },
+ },
+ .register_pair_pre_indexed => |register_pair_pre_indexed| switch (register_pair_pre_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ },
+ },
+ .register_unscaled_immediate => {},
+ .register_immediate_post_indexed => |register_immediate_post_indexed| switch (register_immediate_post_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ return writer.print("{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ .vector => {},
+ },
+ .register_unprivileged => {},
+ .register_immediate_pre_indexed => |register_immediate_pre_indexed| switch (register_immediate_pre_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated => break :unallocated,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .strb, .ldrb, .strh, .ldrh => .word,
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(group.opc1.decode(group.size)).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ },
+ .register_register_offset => |register_register_offset| switch (register_register_offset.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated, .prfm => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ try writer.print("{f}{s}{f}{s}[{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(group.option.sf(), .{}).fmtCase(dis.case),
+ });
+ if (group.option != .lsl or group.S) {
+ try writer.print("{s}{f}", .{
+ dis.operands_separator,
+ fmtCase(group.option, dis.case),
+ });
+ if (group.S) try writer.print(" #{d}", .{
+ @intFromEnum(group.size),
+ });
+ }
+ return writer.writeByte(']');
+ },
+ .vector => {},
+ },
+ .register_unsigned_immediate => |register_unsigned_immediate| switch (register_unsigned_immediate.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated, .prfm => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ try writer.print("{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm12 > 0) try writer.print("{s}#0x{x}", .{
+ dis.operands_separator,
+ @as(u15, group.imm12) << @intFromEnum(group.size),
+ });
+ return writer.writeByte(']');
+ },
+ .vector => {},
+ },
+ },
+ .data_processing_register => |data_processing_register| switch (data_processing_register.decode()) {
+ .unallocated => break :unallocated,
+ .data_processing_two_source => |data_processing_two_source| {
+ const decoded = data_processing_two_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_two_source.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ },
+ .data_processing_one_source => |data_processing_one_source| {
+ const decoded = data_processing_one_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_one_source.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ },
+ .logical_shifted_register => |logical_shifted_register| {
+ const decoded = logical_shifted_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = logical_shifted_register.group;
+ const sf = group.sf;
+ const shift = group.shift;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const amount = group.imm6;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ const elide_shift = shift == .lsl and amount == 0;
+ if (dis.enable_aliases and switch (decoded) {
+ else => false,
+ .orr => elide_shift,
+ .orn => true,
+ } and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { mov, mvn }, switch (decoded) {
+ else => unreachable,
+ .orr => .mov,
+ .orn => .mvn,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(.tst, dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(shift, dis.case),
+ amount,
+ });
+ },
+ .add_subtract_shifted_register => |add_subtract_shifted_register| {
+ const decoded = add_subtract_shifted_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = add_subtract_shifted_register.group;
+ const sf = group.sf;
+ const shift = group.shift;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const imm6 = group.imm6;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
+ .add => .cmn,
+ .sub => .cmp,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else if (dis.enable_aliases and group.op == .sub and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { neg, negs }, switch (group.S) {
+ false => .neg,
+ true => .negs,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (shift != .lsl or imm6 != 0) return writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(shift, dis.case),
+ imm6,
+ });
+ },
+ .add_subtract_extended_register => |add_subtract_extended_register| {
+ const decoded = add_subtract_extended_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = add_subtract_extended_register.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(group.option.sf(), .{});
+ const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = true });
+ if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
+ .add => .cmn,
+ .sub => .cmp,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (group.option != @as(Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option, switch (sf) {
+ .word => .uxtw,
+ .doubleword => .uxtx,
+ }) or group.imm3 != 0) writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(group.option, dis.case),
+ group.imm3,
+ });
+ },
+ .add_subtract_with_carry => |add_subtract_with_carry| {
+ const decoded = add_subtract_with_carry.decode();
+ const group = add_subtract_with_carry.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ return if (dis.enable_aliases and group.op == .sbc and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { ngc, ngcs }, switch (group.S) {
+ false => .ngc,
+ true => .ngcs,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ },
+ .rotate_right_into_flags => {},
+ .evaluate_into_flags => {},
+ .conditional_compare_register => {},
+ .conditional_compare_immediate => {},
+ .conditional_select => |conditional_select| {
+ const decoded = conditional_select.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = conditional_select.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const cond = group.cond;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ return if (dis.enable_aliases and group.op != group.op2 and Rm.alias == .zr and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cset, csetm }, switch (decoded) {
+ else => unreachable,
+ .csinc => .cset,
+ .csinv => .csetm,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond.invert(), dis.case),
+ }) else if (dis.enable_aliases and decoded != .csel and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cinc, cinv, cneg }, switch (decoded) {
+ else => unreachable,
+ .csinc => .cinc,
+ .csinv => .cinv,
+ .csneg => .cneg,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond.invert(), dis.case),
+ }) else writer.print("{f}{s}{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond, dis.case),
+ });
+ },
+ .data_processing_three_source => |data_processing_three_source| {
+ const decoded = data_processing_three_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_three_source.group;
+ const sf = group.sf;
+ try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ return switch (decoded) {
+ .unallocated => unreachable,
+ .madd, .msub, .smaddl, .smsubl, .umaddl, .umsubl => writer.print("{s}{f}", .{
+ dis.operands_separator,
+ group.Ra.decodeInteger(sf, .{}).fmtCase(dis.case),
+ }),
+ .smulh, .umulh => {},
+ };
+ },
+ },
+ .data_processing_vector => {},
+ }
+ return writer.print(".{f}{s}0x{x:0>8}", .{
+ fmtCase(.word, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(Instruction.Backing, @bitCast(inst)),
+ });
+}
+
+fn fmtCase(tag: anytype, case: Case) struct {
+ tag: []const u8,
+ case: Case,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ for (data.tag) |c| try writer.writeByte(switch (data.case) {
+ .lower => std.ascii.toLower(c),
+ .upper => std.ascii.toUpper(c),
+ });
+ }
+} {
+ return .{ .tag = @tagName(tag), .case = case };
+}
+
+pub const RegisterFormatter = struct {
+ reg: aarch64.encoding.Register,
+ case: Case,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ switch (data.reg.format) {
+ .alias => try writer.print("{f}", .{fmtCase(data.reg.alias, data.case)}),
+ .integer => |size| switch (data.reg.alias) {
+ .r0,
+ .r1,
+ .r2,
+ .r3,
+ .r4,
+ .r5,
+ .r6,
+ .r7,
+ .r8,
+ .r9,
+ .r10,
+ .r11,
+ .r12,
+ .r13,
+ .r14,
+ .r15,
+ .r16,
+ .r17,
+ .r18,
+ .r19,
+ .r20,
+ .r21,
+ .r22,
+ .r23,
+ .r24,
+ .r25,
+ .r26,
+ .r27,
+ .r28,
+ .r29,
+ .r30,
+ => |alias| try writer.print("{c}{d}", .{
+ size.prefix(),
+ @intFromEnum(alias.encode(.{})),
+ }),
+ .zr => try writer.print("{c}{f}", .{
+ size.prefix(),
+ fmtCase(data.reg.alias, data.case),
+ }),
+ else => try writer.print("{s}{f}", .{
+ switch (size) {
+ .word => "w",
+ .doubleword => "",
+ },
+ fmtCase(data.reg.alias, data.case),
+ }),
+ },
+ .scalar => |size| try writer.print("{c}{d}", .{
+ size.prefix(),
+ @intFromEnum(data.reg.alias.encode(.{ .V = true })),
+ }),
+ .vector => |arrangement| try writer.print("{f}.{f}", .{
+ fmtCase(data.reg.alias, data.case),
+ fmtCase(arrangement, data.case),
+ }),
+ .element => |element| try writer.print("{f}.{c}[{d}]", .{
+ fmtCase(data.reg.alias, data.case),
+ element.size.prefix(),
+ element.index,
+ }),
+ }
+ }
+};
+
+const aarch64 = @import("../aarch64.zig");
+const Disassemble = @This();
+const Instruction = aarch64.encoding.Instruction;
+const std = @import("std");
diff --git a/src/codegen/aarch64/Mir.zig b/src/codegen/aarch64/Mir.zig
new file mode 100644
index 0000000000..b6598b7ea7
--- /dev/null
+++ b/src/codegen/aarch64/Mir.zig
@@ -0,0 +1,348 @@
+prologue: []const Instruction,
+body: []const Instruction,
+epilogue: []const Instruction,
+literals: []const u32,
+nav_relocs: []const Reloc.Nav,
+uav_relocs: []const Reloc.Uav,
+lazy_relocs: []const Reloc.Lazy,
+global_relocs: []const Reloc.Global,
+literal_relocs: []const Reloc.Literal,
+
+pub const Reloc = struct {
+ label: u32,
+ addend: u64 align(@alignOf(u32)) = 0,
+
+ pub const Nav = struct {
+ nav: InternPool.Nav.Index,
+ reloc: Reloc,
+ };
+
+ pub const Uav = struct {
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
+ reloc: Reloc,
+ };
+
+ pub const Lazy = struct {
+ symbol: link.File.LazySymbol,
+ reloc: Reloc,
+ };
+
+ pub const Global = struct {
+ name: [*:0]const u8,
+ reloc: Reloc,
+ };
+
+ pub const Literal = struct {
+ label: u32,
+ };
+};
+
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
+ assert(mir.body.ptr + mir.body.len == mir.prologue.ptr);
+ assert(mir.prologue.ptr + mir.prologue.len == mir.epilogue.ptr);
+ gpa.free(mir.body.ptr[0 .. mir.body.len + mir.prologue.len + mir.epilogue.len]);
+ gpa.free(mir.literals);
+ gpa.free(mir.nav_relocs);
+ gpa.free(mir.uav_relocs);
+ gpa.free(mir.lazy_relocs);
+ gpa.free(mir.global_relocs);
+ gpa.free(mir.literal_relocs);
+ mir.* = undefined;
+}
+
+pub fn emit(
+ mir: Mir,
+ lf: *link.File,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
+ func_index: InternPool.Index,
+ code: *std.ArrayListUnmanaged(u8),
+ debug_output: link.File.DebugInfoOutput,
+) !void {
+ _ = debug_output;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+ const func = zcu.funcInfo(func_index);
+ const nav = ip.getNav(func.owner_nav);
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
+ const target = &mod.resolved_target.result;
+ mir_log.debug("{f}:", .{nav.fqn.fmt(ip)});
+
+ const func_align = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
+ else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
+ };
+ const code_len = mir.prologue.len + mir.body.len + mir.epilogue.len;
+ const literals_align_gap = -%code_len & (@divExact(
+ @as(u5, @intCast(func_align.minStrict(.@"16").toByteUnits().?)),
+ Instruction.size,
+ ) - 1);
+ try code.ensureUnusedCapacity(gpa, Instruction.size *
+ (code_len + literals_align_gap + mir.literals.len));
+ emitInstructionsForward(code, mir.prologue);
+ emitInstructionsBackward(code, mir.body);
+ const body_end: u32 = @intCast(code.items.len);
+ emitInstructionsBackward(code, mir.epilogue);
+ code.appendNTimesAssumeCapacity(0, Instruction.size * literals_align_gap);
+ code.appendSliceAssumeCapacity(@ptrCast(mir.literals));
+ mir_log.debug("", .{});
+
+ for (mir.nav_relocs) |nav_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ switch (try @import("../../codegen.zig").genNavRef(
+ lf,
+ pt,
+ src_loc,
+ nav_reloc.nav,
+ &mod.resolved_target.result,
+ )) {
+ .sym_index => |sym_index| sym_index,
+ .fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
+ },
+ mir.body[nav_reloc.reloc.label],
+ body_end - Instruction.size * (1 + nav_reloc.reloc.label),
+ nav_reloc.reloc.addend,
+ );
+ for (mir.uav_relocs) |uav_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ switch (try lf.lowerUav(
+ pt,
+ uav_reloc.uav.val,
+ ZigType.fromInterned(uav_reloc.uav.orig_ty).ptrAlignment(zcu),
+ src_loc,
+ )) {
+ .sym_index => |sym_index| sym_index,
+ .fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
+ },
+ mir.body[uav_reloc.reloc.label],
+ body_end - Instruction.size * (1 + uav_reloc.reloc.label),
+ uav_reloc.reloc.addend,
+ );
+ for (mir.lazy_relocs) |lazy_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ if (lf.cast(.elf)) |ef|
+ ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_reloc.symbol) catch |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else if (lf.cast(.macho)) |mf|
+ mf.getZigObject().?.getOrCreateMetadataForLazySymbol(mf, pt, lazy_reloc.symbol) catch |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else if (lf.cast(.coff)) |cf|
+ if (cf.getOrCreateAtomForLazySymbol(pt, lazy_reloc.symbol)) |atom|
+ cf.getAtom(atom).getSymbolIndex().?
+ else |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else
+ return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
+ mir.body[lazy_reloc.reloc.label],
+ body_end - Instruction.size * (1 + lazy_reloc.reloc.label),
+ lazy_reloc.reloc.addend,
+ );
+ for (mir.global_relocs) |global_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ if (lf.cast(.elf)) |ef|
+ try ef.getGlobalSymbol(std.mem.span(global_reloc.name), null)
+ else if (lf.cast(.macho)) |mf|
+ try mf.getGlobalSymbol(std.mem.span(global_reloc.name), null)
+ else if (lf.cast(.coff)) |cf|
+ try cf.getGlobalSymbol(std.mem.span(global_reloc.name), "compiler_rt")
+ else
+ return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
+ mir.body[global_reloc.reloc.label],
+ body_end - Instruction.size * (1 + global_reloc.reloc.label),
+ global_reloc.reloc.addend,
+ );
+ const literal_reloc_offset: i19 = @intCast(mir.epilogue.len + literals_align_gap);
+ for (mir.literal_relocs) |literal_reloc| {
+ var instruction = mir.body[literal_reloc.label];
+ instruction.load_store.register_literal.group.imm19 += literal_reloc_offset;
+ instruction.write(
+ code.items[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
+ );
+ }
+}
+
+fn emitInstructionsForward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
+ for (instructions) |instruction| emitInstruction(code, instruction);
+}
+fn emitInstructionsBackward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
+ var instruction_index = instructions.len;
+ while (instruction_index > 0) {
+ instruction_index -= 1;
+ emitInstruction(code, instructions[instruction_index]);
+ }
+}
+fn emitInstruction(code: *std.ArrayListUnmanaged(u8), instruction: Instruction) void {
+ mir_log.debug(" {f}", .{instruction});
+ instruction.write(code.addManyAsArrayAssumeCapacity(Instruction.size));
+}
+
+fn emitReloc(
+ lf: *link.File,
+ zcu: *Zcu,
+ owner_nav: InternPool.Nav.Index,
+ sym_index: u32,
+ instruction: Instruction,
+ offset: u32,
+ addend: u64,
+) !void {
+ const gpa = zcu.gpa;
+ switch (instruction.decode()) {
+ else => unreachable,
+ .data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode()) {
+ else => unreachable,
+ .pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
+ .adr => .ADR_PREL_LO21,
+ .adrp => .ADR_PREL_PG_HI21,
+ },
+ .add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
+ .add => .ADD_ABS_LO12_NC,
+ .sub => unreachable,
+ },
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ switch (decoded.decode()) {
+ else => unreachable,
+ .pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
+ .adr => unreachable,
+ .adrp => try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .page,
+ .meta = .{
+ .pcrel = true,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ }),
+ },
+ .add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
+ .add => try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .pageoff,
+ .meta = .{
+ .pcrel = false,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ }),
+ .sub => unreachable,
+ },
+ }
+ },
+ .branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
+ .b => .JUMP26,
+ .bl => .CALL26,
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .branch,
+ .meta = .{
+ .pcrel = true,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ });
+ },
+ .load_store => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode().register_unsigned_immediate.decode()) {
+ .integer => |integer| switch (integer.decode()) {
+ .unallocated, .prfm => unreachable,
+ .strb, .ldrb, .ldrsb => .LDST8_ABS_LO12_NC,
+ .strh, .ldrh, .ldrsh => .LDST16_ABS_LO12_NC,
+ .ldrsw => .LDST32_ABS_LO12_NC,
+ inline .str, .ldr => |encoded| switch (encoded.sf) {
+ .word => .LDST32_ABS_LO12_NC,
+ .doubleword => .LDST64_ABS_LO12_NC,
+ },
+ },
+ .vector => |vector| switch (vector.group.opc1.decode(vector.group.size)) {
+ .byte => .LDST8_ABS_LO12_NC,
+ .half => .LDST16_ABS_LO12_NC,
+ .single => .LDST32_ABS_LO12_NC,
+ .double => .LDST64_ABS_LO12_NC,
+ .quad => .LDST128_ABS_LO12_NC,
+ .scalable, .predicate => unreachable,
+ },
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .pageoff,
+ .meta = .{
+ .pcrel = false,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ });
+ },
+ }
+}
+
+const Air = @import("../../Air.zig");
+const assert = std.debug.assert;
+const mir_log = std.log.scoped(.mir);
+const Instruction = @import("encoding.zig").Instruction;
+const InternPool = @import("../../InternPool.zig");
+const link = @import("../../link.zig");
+const Mir = @This();
+const std = @import("std");
+const target_util = @import("../../target.zig");
+const Zcu = @import("../../Zcu.zig");
+const ZigType = @import("../../Type.zig");
diff --git a/src/codegen/aarch64/Select.zig b/src/codegen/aarch64/Select.zig
new file mode 100644
index 0000000000..d030eab471
--- /dev/null
+++ b/src/codegen/aarch64/Select.zig
@@ -0,0 +1,12141 @@
+pt: Zcu.PerThread,
+target: *const std.Target,
+air: Air,
+nav_index: InternPool.Nav.Index,
+
+// Blocks
+def_order: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, void),
+blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, Block),
+loops: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, Loop),
+active_loops: std.ArrayListUnmanaged(Loop.Index),
+loop_live: struct {
+ set: std.AutoArrayHashMapUnmanaged(struct { Loop.Index, Air.Inst.Index }, void),
+ list: std.ArrayListUnmanaged(Air.Inst.Index),
+},
+dom_start: u32,
+dom_len: u32,
+dom: std.ArrayListUnmanaged(DomInt),
+
+// Wip Mir
+saved_registers: std.enums.EnumSet(Register.Alias),
+instructions: std.ArrayListUnmanaged(codegen.aarch64.encoding.Instruction),
+literals: std.ArrayListUnmanaged(u32),
+nav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Nav),
+uav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Uav),
+lazy_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Lazy),
+global_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Global),
+literal_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Literal),
+
+// Stack Frame
+returns: bool,
+va_list: union(enum) {
+ other: Value.Indirect,
+ sysv: struct {
+ __stack: Value.Indirect,
+ __gr_top: Value.Indirect,
+ __vr_top: Value.Indirect,
+ __gr_offs: i32,
+ __vr_offs: i32,
+ },
+},
+stack_size: u24,
+stack_align: InternPool.Alignment,
+
+// Value Tracking
+live_registers: LiveRegisters,
+live_values: std.AutoHashMapUnmanaged(Air.Inst.Index, Value.Index),
+values: std.ArrayListUnmanaged(Value),
+
+pub const LiveRegisters = std.enums.EnumArray(Register.Alias, Value.Index);
+
+pub const Block = struct {
+ live_registers: LiveRegisters,
+ target_label: u32,
+
+ pub const main: Air.Inst.Index = @enumFromInt(
+ std.math.maxInt(@typeInfo(Air.Inst.Index).@"enum".tag_type),
+ );
+
+ fn branch(target_block: *const Block, isel: *Select) !void {
+ if (isel.instructions.items.len > target_block.target_label) {
+ try isel.emit(.b(@intCast((isel.instructions.items.len + 1 - target_block.target_label) << 2)));
+ }
+ try isel.merge(&target_block.live_registers, .{});
+ }
+};
+
+pub const Loop = struct {
+ def_order: u32,
+ dom: u32,
+ depth: u32,
+ live: u32,
+ live_registers: LiveRegisters,
+ repeat_list: u32,
+
+ pub const invalid: Air.Inst.Index = @enumFromInt(
+ std.math.maxInt(@typeInfo(Air.Inst.Index).@"enum".tag_type),
+ );
+
+ pub const Index = enum(u32) {
+ _,
+
+ fn inst(li: Loop.Index, isel: *Select) Air.Inst.Index {
+ return isel.loops.keys()[@intFromEnum(li)];
+ }
+
+ fn get(li: Loop.Index, isel: *Select) *Loop {
+ return &isel.loops.values()[@intFromEnum(li)];
+ }
+ };
+
+ pub const empty_list: u32 = std.math.maxInt(u32);
+
+ fn branch(target_loop: *Loop, isel: *Select) !void {
+ try isel.instructions.ensureUnusedCapacity(isel.pt.zcu.gpa, 1);
+ const repeat_list_tail = target_loop.repeat_list;
+ target_loop.repeat_list = @intCast(isel.instructions.items.len);
+ isel.instructions.appendAssumeCapacity(@bitCast(repeat_list_tail));
+ try isel.merge(&target_loop.live_registers, .{});
+ }
+};
+
+pub fn deinit(isel: *Select) void {
+ const gpa = isel.pt.zcu.gpa;
+
+ isel.def_order.deinit(gpa);
+ isel.blocks.deinit(gpa);
+ isel.loops.deinit(gpa);
+ isel.active_loops.deinit(gpa);
+ isel.loop_live.set.deinit(gpa);
+ isel.loop_live.list.deinit(gpa);
+ isel.dom.deinit(gpa);
+
+ isel.instructions.deinit(gpa);
+ isel.literals.deinit(gpa);
+ isel.nav_relocs.deinit(gpa);
+ isel.uav_relocs.deinit(gpa);
+ isel.lazy_relocs.deinit(gpa);
+ isel.global_relocs.deinit(gpa);
+ isel.literal_relocs.deinit(gpa);
+
+ isel.live_values.deinit(gpa);
+ isel.values.deinit(gpa);
+
+ isel.* = undefined;
+}
+
+pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+ const air_tags = isel.air.instructions.items(.tag);
+ const air_data = isel.air.instructions.items(.data);
+ var air_body_index: usize = 0;
+ var air_inst_index = air_body[air_body_index];
+ const initial_def_order_len = isel.def_order.count();
+ air_tag: switch (air_tags[@intFromEnum(air_inst_index)]) {
+ .arg,
+ .ret_addr,
+ .frame_addr,
+ .err_return_trace,
+ .save_err_return_trace_index,
+ .runtime_nav_ptr,
+ .c_va_start,
+ => {
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .add,
+ .add_safe,
+ .add_optimized,
+ .add_wrap,
+ .add_sat,
+ .sub,
+ .sub_safe,
+ .sub_optimized,
+ .sub_wrap,
+ .sub_sat,
+ .mul,
+ .mul_safe,
+ .mul_optimized,
+ .mul_wrap,
+ .mul_sat,
+ .div_float,
+ .div_float_optimized,
+ .div_trunc,
+ .div_trunc_optimized,
+ .div_floor,
+ .div_floor_optimized,
+ .div_exact,
+ .div_exact_optimized,
+ .rem,
+ .rem_optimized,
+ .mod,
+ .mod_optimized,
+ .max,
+ .min,
+ .bit_and,
+ .bit_or,
+ .shr,
+ .shr_exact,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .xor,
+ .cmp_lt,
+ .cmp_lt_optimized,
+ .cmp_lte,
+ .cmp_lte_optimized,
+ .cmp_eq,
+ .cmp_eq_optimized,
+ .cmp_gte,
+ .cmp_gte_optimized,
+ .cmp_gt,
+ .cmp_gt_optimized,
+ .cmp_neq,
+ .cmp_neq_optimized,
+ .bool_and,
+ .bool_or,
+ .array_elem_val,
+ .slice_elem_val,
+ .ptr_elem_val,
+ => {
+ const bin_op = air_data[@intFromEnum(air_inst_index)].bin_op;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .ptr_add,
+ .ptr_sub,
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ .slice,
+ .slice_elem_ptr,
+ .ptr_elem_ptr,
+ => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .alloc => {
+ const ty = air_data[@intFromEnum(air_inst_index)].ty;
+
+ isel.stack_align = isel.stack_align.maxStrict(ty.ptrAlignment(zcu));
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .inferred_alloc,
+ .inferred_alloc_comptime,
+ .wasm_memory_size,
+ .wasm_memory_grow,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => unreachable,
+ .ret_ptr => {
+ const ty = air_data[@intFromEnum(air_inst_index)].ty;
+
+ if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => isel.stack_align = isel.stack_align.maxStrict(ty.ptrAlignment(zcu)),
+ .value, .constant => unreachable,
+ .address => |address_vi| try isel.live_values.putNoClobber(gpa, air_inst_index, address_vi.ref(isel)),
+ };
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .assembly => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
+ const operands: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0 .. extra.data.flags.outputs_len + extra.data.inputs_len]);
+
+ for (operands) |operand| if (operand != .none) try isel.analyzeUse(operand);
+ if (ty_pl.ty != .void_type) try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .not,
+ .clz,
+ .ctz,
+ .popcount,
+ .byte_swap,
+ .bit_reverse,
+ .abs,
+ .load,
+ .fptrunc,
+ .fpext,
+ .intcast,
+ .intcast_safe,
+ .trunc,
+ .optional_payload,
+ .optional_payload_ptr,
+ .optional_payload_ptr_set,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .errunion_payload_ptr_set,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ .get_union_tag,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
+ .array_to_slice,
+ .int_from_float,
+ .int_from_float_optimized,
+ .int_from_float_safe,
+ .int_from_float_optimized_safe,
+ .float_from_int,
+ .splat,
+ .error_set_has_value,
+ .addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
+ => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .bitcast => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+ maybe_noop: {
+ if (ty_op.ty.toInterned().? != isel.air.typeOf(ty_op.operand, ip).toIntern()) break :maybe_noop;
+ if (true) break :maybe_noop;
+ if (ty_op.operand.toIndex()) |src_air_inst_index| {
+ if (isel.hints.get(src_air_inst_index)) |hint_vpsi| {
+ try isel.hints.putNoClobber(gpa, air_inst_index, hint_vpsi);
+ }
+ }
+ }
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ inline .block, .dbg_inline_block => |air_tag| {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(switch (air_tag) {
+ else => comptime unreachable,
+ .block => Air.Block,
+ .dbg_inline_block => Air.DbgInlineBlock,
+ }, ty_pl.payload);
+ const result_ty = ty_pl.ty.toInterned().?;
+
+ if (result_ty == .noreturn_type) {
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+
+ air_body_index += 1;
+ break :air_tag;
+ }
+
+ assert(!(try isel.blocks.getOrPut(gpa, air_inst_index)).found_existing);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ const block_entry = isel.blocks.pop().?;
+ assert(block_entry.key == air_inst_index);
+
+ if (result_ty != .void_type) try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .loop => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+
+ const initial_dom_start = isel.dom_start;
+ const initial_dom_len = isel.dom_len;
+ isel.dom_start = @intCast(isel.dom.items.len);
+ isel.dom_len = @intCast(isel.blocks.count());
+ try isel.active_loops.append(gpa, @enumFromInt(isel.loops.count()));
+ try isel.loops.putNoClobber(gpa, air_inst_index, .{
+ .def_order = @intCast(isel.def_order.count()),
+ .dom = isel.dom_start,
+ .depth = isel.dom_len,
+ .live = 0,
+ .live_registers = undefined,
+ .repeat_list = undefined,
+ });
+ try isel.dom.appendNTimes(gpa, 0, std.math.divCeil(usize, isel.dom_len, @bitSizeOf(DomInt)) catch unreachable);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ for (
+ isel.dom.items[initial_dom_start..].ptr,
+ isel.dom.items[isel.dom_start..][0 .. std.math.divCeil(usize, initial_dom_len, @bitSizeOf(DomInt)) catch unreachable],
+ ) |*initial_dom, loop_dom| initial_dom.* |= loop_dom;
+ isel.dom_start = initial_dom_start;
+ isel.dom_len = initial_dom_len;
+ assert(isel.active_loops.pop().?.inst(isel) == air_inst_index);
+
+ air_body_index += 1;
+ },
+ .repeat, .trap, .unreach => air_body_index += 1,
+ .br => {
+ const br = air_data[@intFromEnum(air_inst_index)].br;
+ const block_index = isel.blocks.getIndex(br.block_inst).?;
+ if (block_index < isel.dom_len) isel.dom.items[isel.dom_start + block_index / @bitSizeOf(DomInt)] |= @as(DomInt, 1) << @truncate(block_index);
+ try isel.analyzeUse(br.operand);
+
+ air_body_index += 1;
+ },
+ .breakpoint, .dbg_stmt, .dbg_empty_stmt, .dbg_var_ptr, .dbg_var_val, .dbg_arg_inline, .c_va_end => {
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .call,
+ .call_always_tail,
+ .call_never_tail,
+ .call_never_inline,
+ => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
+ isel.saved_registers.insert(.lr);
+ const callee_ty = isel.air.typeOf(pl_op.operand, ip);
+ const func_info = switch (ip.indexToKey(callee_ty.toIntern())) {
+ else => unreachable,
+ .func_type => |func_type| func_type,
+ .ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type,
+ };
+
+ try isel.analyzeUse(pl_op.operand);
+ var param_it: CallAbiIterator = .init;
+ for (args, 0..) |arg, arg_index| {
+ const restore_values_len = isel.values.items.len;
+ defer isel.values.shrinkRetainingCapacity(restore_values_len);
+ const param_vi = param_vi: {
+ const param_ty = isel.air.typeOf(arg, ip);
+ if (arg_index >= func_info.param_types.len) {
+ assert(func_info.is_var_args);
+ switch (isel.va_list) {
+ .other => break :param_vi try param_it.nonSysvVarArg(isel, param_ty),
+ .sysv => {},
+ }
+ }
+ break :param_vi try param_it.param(isel, param_ty);
+ } orelse continue;
+ defer param_vi.deref(isel);
+ const passed_vi = switch (param_vi.parent(isel)) {
+ .unallocated, .stack_slot => param_vi,
+ .value, .constant => unreachable,
+ .address => |address_vi| address_vi,
+ };
+ switch (passed_vi.parent(isel)) {
+ .unallocated => {},
+ .stack_slot => |stack_slot| {
+ assert(stack_slot.base == .sp);
+ isel.stack_size = @max(
+ isel.stack_size,
+ stack_slot.offset + @as(u24, @intCast(passed_vi.size(isel))),
+ );
+ },
+ .value, .constant, .address => unreachable,
+ }
+
+ try isel.analyzeUse(arg);
+ }
+
+ var ret_it: CallAbiIterator = .init;
+ if (try ret_it.ret(isel, isel.air.typeOfIndex(air_inst_index, ip))) |ret_vi| {
+ tracking_log.debug("${d} <- %{d}", .{ @intFromEnum(ret_vi), @intFromEnum(air_inst_index) });
+ switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ defer address_vi.deref(isel);
+ const ret_value = ret_vi.get(isel);
+ ret_value.flags.parent_tag = .unallocated;
+ ret_value.parent_payload = .{ .unallocated = {} };
+ },
+ }
+ try isel.live_values.putNoClobber(gpa, air_inst_index, ret_vi);
+
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+ }
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ .neg,
+ .neg_optimized,
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .is_named_enum_value,
+ .tag_name,
+ .error_name,
+ .cmp_lt_errors_len,
+ => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+
+ try isel.analyzeUse(un_op);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cmp_vector, .cmp_vector_optimized => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.lhs);
+ try isel.analyzeUse(extra.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cond_br => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
+
+ try isel.analyzeUse(pl_op.operand);
+
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
+
+ air_body_index += 1;
+ },
+ .switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air_inst_index);
+
+ try isel.analyzeUse(switch_br.operand);
+
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| try isel.analyze(case.body);
+ if (switch_br.else_body_len > 0) try isel.analyze(cases_it.elseBody());
+
+ air_body_index += 1;
+ },
+ .loop_switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air_inst_index);
+
+ const initial_dom_start = isel.dom_start;
+ const initial_dom_len = isel.dom_len;
+ isel.dom_start = @intCast(isel.dom.items.len);
+ isel.dom_len = @intCast(isel.blocks.count());
+ try isel.active_loops.append(gpa, @enumFromInt(isel.loops.count()));
+ try isel.loops.putNoClobber(gpa, air_inst_index, .{
+ .def_order = @intCast(isel.def_order.count()),
+ .dom = isel.dom_start,
+ .depth = isel.dom_len,
+ .live = 0,
+ .live_registers = undefined,
+ .repeat_list = undefined,
+ });
+ try isel.dom.appendNTimes(gpa, 0, std.math.divCeil(usize, isel.dom_len, @bitSizeOf(DomInt)) catch unreachable);
+
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| try isel.analyze(case.body);
+ if (switch_br.else_body_len > 0) try isel.analyze(cases_it.elseBody());
+
+ for (
+ isel.dom.items[initial_dom_start..].ptr,
+ isel.dom.items[isel.dom_start..][0 .. std.math.divCeil(usize, initial_dom_len, @bitSizeOf(DomInt)) catch unreachable],
+ ) |*initial_dom, loop_dom| initial_dom.* |= loop_dom;
+ isel.dom_start = initial_dom_start;
+ isel.dom_len = initial_dom_len;
+ assert(isel.active_loops.pop().?.inst(isel) == air_inst_index);
+
+ air_body_index += 1;
+ },
+ .switch_dispatch => {
+ const br = air_data[@intFromEnum(air_inst_index)].br;
+
+ try isel.analyzeUse(br.operand);
+
+ air_body_index += 1;
+ },
+ .@"try", .try_cold => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.Try, pl_op.payload);
+
+ try isel.analyzeUse(pl_op.operand);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .try_ptr, .try_ptr_cold => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.TryPtr, ty_pl.payload);
+
+ try isel.analyzeUse(extra.data.ptr);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .ret, .ret_safe, .ret_load => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+ isel.returns = true;
+
+ const block_index = 0;
+ assert(isel.blocks.keys()[block_index] == Block.main);
+ if (isel.dom_len > 0) isel.dom.items[isel.dom_start] |= 1 << block_index;
+
+ try isel.analyzeUse(un_op);
+
+ air_body_index += 1;
+ },
+ .store,
+ .store_safe,
+ .set_union_tag,
+ .memset,
+ .memset_safe,
+ .memcpy,
+ .memmove,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ => {
+ const bin_op = air_data[@intFromEnum(air_inst_index)].bin_op;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .struct_field_ptr, .struct_field_val => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.struct_operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .slice_len => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ const slice_vi = try isel.use(ty_op.operand);
+ var len_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 8, 8);
+ if (try len_part_it.only(isel)) |len_part_vi|
+ try isel.live_values.putNoClobber(gpa, air_inst_index, len_part_vi.ref(isel));
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .slice_ptr => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ const slice_vi = try isel.use(ty_op.operand);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 0, 8);
+ if (try ptr_part_it.only(isel)) |ptr_part_vi|
+ try isel.live_values.putNoClobber(gpa, air_inst_index, ptr_part_vi.ref(isel));
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .reduce, .reduce_optimized => {
+ const reduce = air_data[@intFromEnum(air_inst_index)].reduce;
+
+ try isel.analyzeUse(reduce.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .shuffle_one => {
+ const extra = isel.air.unwrapShuffleOne(zcu, air_inst_index);
+
+ try isel.analyzeUse(extra.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .shuffle_two => {
+ const extra = isel.air.unwrapShuffleTwo(zcu, air_inst_index);
+
+ try isel.analyzeUse(extra.operand_a);
+ try isel.analyzeUse(extra.operand_b);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .select, .mul_add => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const bin_op = isel.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try isel.analyzeUse(pl_op.operand);
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cmpxchg_weak, .cmpxchg_strong => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.ptr);
+ try isel.analyzeUse(extra.expected_value);
+ try isel.analyzeUse(extra.new_value);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .atomic_load => {
+ const atomic_load = air_data[@intFromEnum(air_inst_index)].atomic_load;
+
+ try isel.analyzeUse(atomic_load.ptr);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .atomic_rmw => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+
+ try isel.analyzeUse(extra.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .aggregate_init => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const elements: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[ty_pl.payload..][0..@intCast(ty_pl.ty.toType().arrayLen(zcu))]);
+
+ for (elements) |element| try isel.analyzeUse(element);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .union_init => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.UnionInit, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.init);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .prefetch => {
+ const prefetch = air_data[@intFromEnum(air_inst_index)].prefetch;
+
+ try isel.analyzeUse(prefetch.ptr);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .field_parent_ptr => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.field_ptr);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .set_err_return_trace => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+
+ try isel.analyzeUse(un_op);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .vector_store_elem => {
+ const vector_store_elem = air_data[@intFromEnum(air_inst_index)].vector_store_elem;
+ const bin_op = isel.air.extraData(Air.Bin, vector_store_elem.payload).data;
+
+ try isel.analyzeUse(vector_store_elem.vector_ptr);
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ }
+ assert(air_body_index == air_body.len);
+ isel.def_order.shrinkRetainingCapacity(initial_def_order_len);
+}
+
+fn analyzeUse(isel: *Select, air_ref: Air.Inst.Ref) !void {
+ const air_inst_index = air_ref.toIndex() orelse return;
+ const def_order_index = isel.def_order.getIndex(air_inst_index).?;
+
+ // Loop liveness
+ var active_loop_index = isel.active_loops.items.len;
+ while (active_loop_index > 0) {
+ const prev_active_loop_index = active_loop_index - 1;
+ const active_loop = isel.active_loops.items[prev_active_loop_index];
+ if (def_order_index >= active_loop.get(isel).def_order) break;
+ active_loop_index = prev_active_loop_index;
+ }
+ if (active_loop_index < isel.active_loops.items.len) {
+ const active_loop = isel.active_loops.items[active_loop_index];
+ const loop_live_gop =
+ try isel.loop_live.set.getOrPut(isel.pt.zcu.gpa, .{ active_loop, air_inst_index });
+ if (!loop_live_gop.found_existing) active_loop.get(isel).live += 1;
+ }
+}
+
+pub fn finishAnalysis(isel: *Select) !void {
+ const gpa = isel.pt.zcu.gpa;
+
+ // Loop Liveness
+ if (isel.loops.count() > 0) {
+ try isel.loops.ensureUnusedCapacity(gpa, 1);
+
+ const loop_live_len: u32 = @intCast(isel.loop_live.set.count());
+ if (loop_live_len > 0) {
+ try isel.loop_live.list.resize(gpa, loop_live_len);
+
+ const loops = isel.loops.values();
+ for (loops[1..], loops[0 .. loops.len - 1]) |*loop, prev_loop| loop.live += prev_loop.live;
+ assert(loops[loops.len - 1].live == loop_live_len);
+
+ for (isel.loop_live.set.keys()) |entry| {
+ const loop, const inst = entry;
+ const loop_live = &loop.get(isel).live;
+ loop_live.* -= 1;
+ isel.loop_live.list.items[loop_live.*] = inst;
+ }
+ assert(loops[0].live == 0);
+ }
+
+ const invalid_gop = isel.loops.getOrPutAssumeCapacity(Loop.invalid);
+ assert(!invalid_gop.found_existing);
+ invalid_gop.value_ptr.live = loop_live_len;
+ }
+}
+
+pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+
+ {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => {
+ const ra = &live_reg_entry.value.get(isel).location_payload.small.register;
+ assert(ra.* == live_reg_entry.key);
+ ra.* = .zr;
+ live_reg_entry.value.* = .free;
+ },
+ .allocating => live_reg_entry.value.* = .free,
+ .free => {},
+ };
+ }
+
+ var air: struct {
+ isel: *Select,
+ tag_items: []const Air.Inst.Tag,
+ data_items: []const Air.Inst.Data,
+ body: []const Air.Inst.Index,
+ body_index: u32,
+ inst_index: Air.Inst.Index,
+
+ fn tag(it: *@This(), inst_index: Air.Inst.Index) Air.Inst.Tag {
+ return it.tag_items[@intFromEnum(inst_index)];
+ }
+
+ fn data(it: *@This(), inst_index: Air.Inst.Index) Air.Inst.Data {
+ return it.data_items[@intFromEnum(inst_index)];
+ }
+
+ fn next(it: *@This()) ?Air.Inst.Tag {
+ if (it.body_index == 0) {
+ @branchHint(.unlikely);
+ return null;
+ }
+ it.body_index -= 1;
+ it.inst_index = it.body[it.body_index];
+ wip_mir_log.debug("{f}", .{it.fmtAir(it.inst_index)});
+ return it.tag(it.inst_index);
+ }
+
+ fn fmtAir(it: @This(), inst: Air.Inst.Index) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ pub fn format(fmt_air: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ fmt_air.isel.air.writeInst(writer, fmt_air.inst, fmt_air.isel.pt, null);
+ }
+ } {
+ return .{ .isel = it.isel, .inst = inst };
+ }
+ } = .{
+ .isel = isel,
+ .tag_items = isel.air.instructions.items(.tag),
+ .data_items = isel.air.instructions.items(.data),
+ .body = air_body,
+ .body_index = @intCast(air_body.len),
+ .inst_index = undefined,
+ };
+ air_tag: switch (air.next().?) {
+ else => |air_tag| return isel.fail("unimplemented {s}", .{@tagName(air_tag)}),
+ .arg => {
+ const arg_vi = isel.live_values.fetchRemove(air.inst_index).?.value;
+ defer arg_vi.deref(isel);
+ switch (arg_vi.parent(isel)) {
+ .unallocated, .stack_slot => if (arg_vi.hint(isel)) |arg_ra| {
+ try arg_vi.defLiveIn(isel, arg_ra, comptime &.initFill(.free));
+ } else {
+ var arg_part_it = arg_vi.parts(isel);
+ while (arg_part_it.next()) |arg_part| {
+ try arg_part.defLiveIn(isel, arg_part.hint(isel).?, comptime &.initFill(.free));
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| try address_vi.defLiveIn(isel, address_vi.hint(isel).?, comptime &.initFill(.free)),
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add, .add_safe, .add_optimized, .add_wrap, .sub, .sub_safe, .sub_optimized, .sub_wrap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) try res_vi.value.addOrSubtract(isel, ty, try isel.use(bin_op.lhs), switch (air_tag) {
+ else => unreachable,
+ .add, .add_safe, .add_wrap => .add,
+ .sub, .sub_safe, .sub_wrap => .sub,
+ }, try isel.use(bin_op.rhs), .{
+ .overflow = switch (air_tag) {
+ else => unreachable,
+ .add, .sub => .@"unreachable",
+ .add_safe, .sub_safe => .{ .panic = .integer_overflow },
+ .add_wrap, .sub_wrap => .wrap,
+ },
+ }) else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ .sub, .sub_optimized => .fsub(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ .sub, .sub_optimized => .fsub(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ .sub, .sub_optimized => .fsub(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ },
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => switch (bits) {
+ else => unreachable,
+ 16 => "__addhf3",
+ 32 => "__addsf3",
+ 64 => "__adddf3",
+ 80 => "__addxf3",
+ 128 => "__addtf3",
+ },
+ .sub, .sub_optimized => switch (bits) {
+ else => unreachable,
+ 16 => "__subhf3",
+ 32 => "__subsf3",
+ 64 => "__subdf3",
+ 80 => "__subxf3",
+ 128 => "__subtf3",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add_sat, .sub_sat => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 32, 64 => |bits| switch (int_info.signedness) {
+ .signed => return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ .unsigned => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const unsat_res_ra = try isel.allocIntReg();
+ defer isel.freeReg(unsat_res_ra);
+ switch (air_tag) {
+ else => unreachable,
+ .add_sat => switch (bits) {
+ else => unreachable,
+ 32 => {
+ try isel.emit(.csinv(res_ra.w(), unsat_res_ra.w(), .wzr, .invert(.cs)));
+ try isel.emit(.adds(unsat_res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 64 => {
+ try isel.emit(.csinv(res_ra.x(), unsat_res_ra.x(), .xzr, .invert(.cs)));
+ try isel.emit(.adds(unsat_res_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ },
+ .sub_sat => switch (bits) {
+ else => unreachable,
+ 32 => {
+ try isel.emit(.csel(res_ra.w(), unsat_res_ra.w(), .wzr, .invert(.cc)));
+ try isel.emit(.subs(unsat_res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 64 => {
+ try isel.emit(.csel(res_ra.x(), unsat_res_ra.x(), .xzr, .invert(.cc)));
+ try isel.emit(.subs(unsat_res_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul, .mul_optimized, .mul_wrap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (int_info.signedness) {
+ .signed => switch (air_tag) {
+ else => unreachable,
+ .mul => break :unused try isel.emit(.orr(res_ra.w(), .wzr, .{ .register = .wzr })),
+ .mul_wrap => {},
+ },
+ .unsigned => {},
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 2...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 1...31 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 32 => {},
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 33...63 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ if (res_hi64_ra) |res_ra| switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 65...127 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 128 => {},
+ },
+ };
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_lo64_mat, const rhs_lo64_mat = lo64_mat: {
+ const res_hi64_lock: RegLock = if (res_hi64_ra != null and res_lo64_ra != null)
+ isel.lockReg(res_hi64_ra.?)
+ else
+ .empty;
+ defer res_hi64_lock.unlock(isel);
+
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ const rhs_lo64_mat = try rhs_lo64_vi.?.matReg(isel);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ const lhs_lo64_mat = try lhs_lo64_vi.?.matReg(isel);
+ break :lo64_mat .{ lhs_lo64_mat, rhs_lo64_mat };
+ };
+ if (res_lo64_ra) |res_ra| try isel.emit(.madd(res_ra.x(), lhs_lo64_mat.ra.x(), rhs_lo64_mat.ra.x(), .xzr));
+ if (res_hi64_ra) |res_ra| {
+ var rhs_hi64_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi64_vi = try rhs_hi64_it.only(isel);
+ const rhs_hi64_mat = try rhs_hi64_vi.?.matReg(isel);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ const lhs_hi64_mat = try lhs_hi64_vi.?.matReg(isel);
+ const acc_ra = try isel.allocIntReg();
+ defer isel.freeReg(acc_ra);
+ try isel.emit(.madd(res_ra.x(), lhs_hi64_mat.ra.x(), rhs_lo64_mat.ra.x(), acc_ra.x()));
+ try isel.emit(.madd(acc_ra.x(), lhs_lo64_mat.ra.x(), rhs_hi64_mat.ra.x(), acc_ra.x()));
+ try isel.emit(.umulh(acc_ra.x(), lhs_lo64_mat.ra.x(), rhs_lo64_mat.ra.x()));
+ try rhs_hi64_mat.finish(isel);
+ try lhs_hi64_mat.finish(isel);
+ }
+ try rhs_lo64_mat.finish(isel);
+ try lhs_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fmul(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ 32 => .fmul(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ 64 => .fmul(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__mulhf3",
+ 32 => "__mulsf3",
+ 64 => "__muldf3",
+ 80 => "__mulxf3",
+ 128 => "__multf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul_safe => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.signedness) {
+ .signed => switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.orr(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .invert(.ne),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.wzr, lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ },
+ .unsigned => switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 2...16 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.wzr, res_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } }));
+ try isel.emit(.madd(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 17...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.xzr, res_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } }));
+ try isel.emit(.umaddl(res_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...63 => |bits| {
+ const lo64_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const hi64_ra = hi64_ra: {
+ const lo64_lock = isel.tryLockReg(lo64_ra);
+ defer lo64_lock.unlock(isel);
+ break :hi64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(hi64_ra);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.cbz(
+ hi64_ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.orr(hi64_ra.x(), hi64_ra.x(), .{ .shifted_register = .{
+ .register = lo64_ra.x(),
+ .shift = .{ .lsr = @intCast(bits) },
+ } }));
+ try isel.emit(.madd(lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.umulh(hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.cbz(
+ hi64_ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.umulh(hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul_sat => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.orr(res_ra.w(), .wzr, .{ .register = .wzr })),
+ .unsigned => {
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ }
+ },
+ 2...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const saturated_ra = switch (int_info.signedness) {
+ .signed => try isel.allocIntReg(),
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 2...31 => try isel.allocIntReg(),
+ 32 => .zr,
+ },
+ };
+ defer if (saturated_ra != .zr) isel.freeReg(saturated_ra);
+ const unwrapped_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_ra);
+ try isel.emit(switch (saturated_ra) {
+ else => .csel(res_ra.w(), unwrapped_ra.w(), saturated_ra.w(), .eq),
+ .zr => .csinv(res_ra.w(), unwrapped_ra.w(), saturated_ra.w(), .eq),
+ });
+ switch (bits) {
+ else => unreachable,
+ 2...7, 9...15, 17...31 => switch (int_info.signedness) {
+ .signed => {
+ const wrapped_ra = try isel.allocIntReg();
+ defer isel.freeReg(wrapped_ra);
+ switch (bits) {
+ else => unreachable,
+ 1...7, 9...15 => {
+ try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .register = wrapped_ra.w() }));
+ try isel.emit(.sbfm(wrapped_ra.w(), unwrapped_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ 17...31 => {
+ try isel.emit(.subs(.xzr, unwrapped_ra.x(), .{ .register = wrapped_ra.x() }));
+ try isel.emit(.sbfm(wrapped_ra.x(), unwrapped_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ }
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 1...7, 9...15 => try isel.emit(.ands(.wzr, unwrapped_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } })),
+ 17...31 => try isel.emit(.ands(.xzr, unwrapped_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } })),
+ },
+ },
+ 8 => try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxtb = 0 },
+ .unsigned => .{ .uxtb = 0 },
+ },
+ } })),
+ 16 => try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxth = 0 },
+ .unsigned => .{ .uxth = 0 },
+ },
+ } })),
+ 32 => try isel.emit(.subs(.xzr, unwrapped_ra.x(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxtw = 0 },
+ .unsigned => .{ .uxtw = 0 },
+ },
+ } })),
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.eor(saturated_ra.w(), saturated_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1 - 1),
+ } }));
+ try isel.emit(.sbfm(saturated_ra.w(), saturated_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(bits - 1),
+ .imms = @intCast(bits - 1 + 1 - 1),
+ }));
+ try isel.emit(.eor(saturated_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 2...31 => try isel.movImmediate(saturated_ra.w(), @as(u32, std.math.maxInt(u32)) >> @intCast(32 - bits)),
+ 32 => {},
+ },
+ }
+ switch (bits) {
+ else => unreachable,
+ 2...16 => try isel.emit(.madd(unwrapped_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr)),
+ 17...32 => switch (int_info.signedness) {
+ .signed => try isel.emit(.smaddl(unwrapped_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr)),
+ .unsigned => try isel.emit(.umaddl(unwrapped_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr)),
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const saturated_ra = switch (int_info.signedness) {
+ .signed => try isel.allocIntReg(),
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 33...63 => try isel.allocIntReg(),
+ 64 => .zr,
+ },
+ };
+ defer if (saturated_ra != .zr) isel.freeReg(saturated_ra);
+ const unwrapped_lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_lo64_ra);
+ const unwrapped_hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_hi64_ra);
+ try isel.emit(switch (saturated_ra) {
+ else => .csel(res_ra.x(), unwrapped_lo64_ra.x(), saturated_ra.x(), .eq),
+ .zr => .csinv(res_ra.x(), unwrapped_lo64_ra.x(), saturated_ra.x(), .eq),
+ });
+ switch (int_info.signedness) {
+ .signed => switch (bits) {
+ else => unreachable,
+ 32...63 => {
+ const wrapped_lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(wrapped_lo64_ra);
+ try isel.emit(.ccmp(
+ unwrapped_lo64_ra.x(),
+ .{ .register = wrapped_lo64_ra.x() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ));
+ try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .asr = 63 },
+ } }));
+ try isel.emit(.sbfm(wrapped_lo64_ra.x(), unwrapped_lo64_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ 64 => try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .asr = @intCast(bits - 1) },
+ } })),
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 32...63 => {
+ const overflow_ra = try isel.allocIntReg();
+ defer isel.freeReg(overflow_ra);
+ try isel.emit(.subs(.xzr, overflow_ra.x(), .{ .immediate = 0 }));
+ try isel.emit(.orr(overflow_ra.x(), unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .lsr = @intCast(bits) },
+ } }));
+ },
+ 64 => try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .immediate = 0 })),
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.eor(saturated_ra.x(), saturated_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1 - 1),
+ } }));
+ try isel.emit(.sbfm(saturated_ra.x(), saturated_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(bits - 1),
+ .imms = @intCast(bits - 1 + 1 - 1),
+ }));
+ try isel.emit(.eor(saturated_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ try isel.emit(.madd(unwrapped_lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.smulh(unwrapped_hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ .unsigned => {
+ switch (bits) {
+ else => unreachable,
+ 32...63 => try isel.movImmediate(saturated_ra.x(), @as(u64, std.math.maxInt(u64)) >> @intCast(64 - bits)),
+ 64 => {},
+ }
+ try isel.emit(.madd(unwrapped_lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.umulh(unwrapped_hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .div_float, .div_float_optimized => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fdiv(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ 32 => .fdiv(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ 64 => .fdiv(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__divhf3",
+ 32 => "__divsf3",
+ 64 => "__divdf3",
+ 80 => "__divxf3",
+ 128 => "__divtf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .div_trunc, .div_trunc_optimized, .div_floor, .div_floor_optimized, .div_exact, .div_exact_optimized => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const div_ra = div_ra: switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_exact => res_ra,
+ .div_floor => switch (int_info.signedness) {
+ .signed => {
+ const div_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(div_ra);
+ const rem_ra = try isel.allocIntReg();
+ defer isel.freeReg(rem_ra);
+ switch (bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.sub(res_ra.w(), div_ra.w(), .{ .register = rem_ra.w() }));
+ try isel.emit(.csinc(rem_ra.w(), .wzr, .wzr, .ge));
+ try isel.emit(.ccmp(
+ rem_ra.w(),
+ .{ .immediate = 0 },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .ne,
+ ));
+ try isel.emit(.eor(rem_ra.w(), rem_ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try isel.emit(.subs(.wzr, rem_ra.w(), .{ .immediate = 0 }));
+ try isel.emit(.msub(rem_ra.w(), div_ra.w(), rhs_mat.ra.w(), lhs_mat.ra.w()));
+ },
+ 33...64 => {
+ try isel.emit(.sub(res_ra.x(), div_ra.x(), .{ .register = rem_ra.x() }));
+ try isel.emit(.csinc(rem_ra.x(), .xzr, .xzr, .ge));
+ try isel.emit(.ccmp(
+ rem_ra.x(),
+ .{ .immediate = 0 },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .ne,
+ ));
+ try isel.emit(.eor(rem_ra.x(), rem_ra.x(), .{ .register = rhs_mat.ra.x() }));
+ try isel.emit(.subs(.xzr, rem_ra.x(), .{ .immediate = 0 }));
+ try isel.emit(.msub(rem_ra.x(), div_ra.x(), rhs_mat.ra.x(), lhs_mat.ra.x()));
+ },
+ }
+ break :div_ra div_ra;
+ },
+ .unsigned => res_ra,
+ },
+ };
+ defer if (div_ra != res_ra) isel.freeReg(div_ra);
+ try isel.emit(switch (bits) {
+ else => unreachable,
+ 1...32 => switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .udiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ },
+ 33...64 => switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .udiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ });
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_exact => {},
+ .div_floor => switch (int_info.signedness) {
+ .signed => return isel.fail("unimplemented {s}", .{@tagName(air_tag)}),
+ .unsigned => {},
+ },
+ }
+
+ try call.prepareReturn(isel);
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ try call.returnLiveIn(isel, res_hi64_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (int_info.signedness) {
+ .signed => "__divti3",
+ .unsigned => "__udivti3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var rhs_hi64_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi64_vi = try rhs_hi64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi64_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi64_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.h(), res_ra.h())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.h(), res_ra.h())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.h(), lhs_ra.h(), rhs_ra.h()));
+ },
+ 32 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.s(), res_ra.s())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.s(), res_ra.s())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.s(), lhs_ra.s(), rhs_ra.s()));
+ },
+ 64 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.d(), res_ra.d())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.d(), res_ra.d())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.d(), lhs_ra.d(), rhs_ra.d()));
+ },
+ }
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => {
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__trunch",
+ 32 => "truncf",
+ 64 => "trunc",
+ 80 => "__truncx",
+ 128 => "truncq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ },
+ .div_floor, .div_floor_optimized => {
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__floorh",
+ 32 => "floorf",
+ 64 => "floor",
+ 80 => "__floorx",
+ 128 => "floorq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ },
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__divhf3",
+ 32 => "__divsf3",
+ 64 => "__divdf3",
+ 80 => "__divxf3",
+ 128 => "__divtf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .rem => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const div_ra = try isel.allocIntReg();
+ defer isel.freeReg(div_ra);
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.msub(res_ra.w(), div_ra.w(), rhs_mat.ra.w(), lhs_mat.ra.w()));
+ try isel.emit(switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .udiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ });
+ },
+ 33...64 => {
+ try isel.emit(.msub(res_ra.x(), div_ra.x(), rhs_mat.ra.x(), lhs_mat.ra.x()));
+ try isel.emit(switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .udiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ });
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ } else {
+ const bits = ty.floatBits(isel.target);
+
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__fmodh",
+ 32 => "fmodf",
+ 64 => "fmod",
+ 80 => "__fmodx",
+ 128 => "fmodq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_add, .ptr_sub => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().elemType2(zcu).abiSize(zcu);
+
+ const base_vi = try isel.use(bin_op.lhs);
+ var base_part_it = base_vi.field(ty_pl.ty.toType(), 0, 8);
+ const base_part_vi = try base_part_it.only(isel);
+ const base_part_mat = try base_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(res_ra, base_part_mat.ra, switch (air_tag) {
+ else => unreachable,
+ .ptr_add => .add,
+ .ptr_sub => .sub,
+ }, elem_size, index_vi);
+ try base_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .max, .min => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const cond: codegen.aarch64.encoding.ConditionCode = switch (air_tag) {
+ else => unreachable,
+ .max => switch (int_info.signedness) {
+ .signed => .ge,
+ .unsigned => .hs,
+ },
+ .min => switch (int_info.signedness) {
+ .signed => .lt,
+ .unsigned => .lo,
+ },
+ };
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.csel(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), cond));
+ try isel.emit(.subs(.wzr, lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 33...64 => {
+ try isel.emit(.csel(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), cond));
+ try isel.emit(.subs(.xzr, lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ .min => .fminnm(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ .min => .fminnm(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ .min => .fminnm(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ },
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (air_tag) {
+ else => unreachable,
+ .max => switch (bits) {
+ else => unreachable,
+ 16 => "__fmaxh",
+ 32 => "fmaxf",
+ 64 => "fmax",
+ 80 => "__fmaxx",
+ 128 => "fmaxq",
+ },
+ .min => switch (bits) {
+ else => unreachable,
+ 16 => "__fminh",
+ 32 => "fminf",
+ 64 => "fmin",
+ 80 => "__fminx",
+ 128 => "fminq",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add_with_overflow, .sub_with_overflow => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const ty_size = lhs_vi.size(isel);
+ var overflow_it = res_vi.value.field(ty_pl.ty.toType(), ty_size, 1);
+ const overflow_vi = try overflow_it.only(isel);
+ var wrapped_it = res_vi.value.field(ty_pl.ty.toType(), 0, ty_size);
+ const wrapped_vi = try wrapped_it.only(isel);
+ try wrapped_vi.?.addOrSubtract(isel, ty, lhs_vi, switch (air_tag) {
+ else => unreachable,
+ .add_with_overflow => .add,
+ .sub_with_overflow => .sub,
+ }, rhs_vi, .{
+ .overflow = if (try overflow_vi.?.defReg(isel)) |overflow_ra| .{ .ra = overflow_ra } else .wrap,
+ });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .alloc, .ret_ptr => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| unused: {
+ defer ptr_vi.value.deref(isel);
+ switch (air_tag) {
+ else => unreachable,
+ .alloc => {},
+ .ret_ptr => if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => break :unused,
+ },
+ }
+ const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty = air.data(air.inst_index).ty;
+ const slot_size = ty.childType(zcu).abiSize(zcu);
+ const slot_align = ty.ptrAlignment(zcu);
+ const slot_offset = slot_align.forward(isel.stack_size);
+ isel.stack_size = @intCast(slot_offset + slot_size);
+ const lo12: u12 = @truncate(slot_offset >> 0);
+ const hi12: u12 = @intCast(slot_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else .sp,
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.add(ptr_ra.x(), .sp, .{ .immediate = lo12 }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .inferred_alloc, .inferred_alloc_comptime => unreachable,
+ .assembly => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
+ var extra_index = extra.end;
+ const outputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.flags.outputs_len]);
+ extra_index += outputs.len;
+ const inputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.inputs_len]);
+ extra_index += inputs.len;
+
+ var as: codegen.aarch64.Assemble = .{
+ .source = undefined,
+ .operands = .empty,
+ };
+ defer as.operands.deinit(gpa);
+
+ for (outputs) |output| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ switch (output) {
+ else => return isel.fail("invalid constraint: '{s}'", .{constraint}),
+ .none => if (std.mem.startsWith(u8, constraint, "={") and std.mem.endsWith(u8, constraint, "}")) {
+ const output_reg = Register.parse(constraint["={".len .. constraint.len - "}".len]) orelse
+ return isel.fail("invalid constraint: '{s}'", .{constraint});
+ const output_ra = output_reg.alias;
+ if (isel.live_values.fetchRemove(air.inst_index)) |output_vi| {
+ defer output_vi.value.deref(isel);
+ try output_vi.value.defLiveIn(isel, output_reg.alias, comptime &.initFill(.free));
+ isel.freeReg(output_ra);
+ }
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate output name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (ty_pl.ty.toType().abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => output_ra.w(),
+ 5...8 => output_ra.x(),
+ else => return isel.fail("too big output type: '{f}'", .{isel.fmtType(ty_pl.ty.toType())}),
+ } };
+ }
+ } else if (std.mem.eql(u8, constraint, "=r")) {
+ const output_ra = if (isel.live_values.fetchRemove(air.inst_index)) |output_vi| output_ra: {
+ defer output_vi.value.deref(isel);
+ break :output_ra try output_vi.value.defReg(isel) orelse try isel.allocIntReg();
+ } else try isel.allocIntReg();
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate output name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (ty_pl.ty.toType().abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => output_ra.w(),
+ 5...8 => output_ra.x(),
+ else => return isel.fail("too big output type: '{f}'", .{isel.fmtType(ty_pl.ty.toType())}),
+ } };
+ }
+ } else return isel.fail("invalid constraint: '{s}'", .{constraint}),
+ }
+ }
+
+ const input_mats = try gpa.alloc(Value.Materialize, inputs.len);
+ defer gpa.free(input_mats);
+ const inputs_extra_index = extra_index;
+ for (inputs, input_mats) |input, *input_mat| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(extra_bytes, 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
+ const input_reg = Register.parse(constraint["{".len .. constraint.len - "}".len]) orelse
+ return isel.fail("invalid constraint: '{s}'", .{constraint});
+ input_mat.* = .{ .vi = try isel.use(input), .ra = input_reg.alias };
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
+ const input_ty = isel.air.typeOf(input, ip);
+ operand_gop.value_ptr.* = .{ .register = switch (input_ty.abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => input_reg.alias.w(),
+ 5...8 => input_reg.alias.x(),
+ else => return isel.fail("too big input type: '{f}'", .{
+ isel.fmtType(isel.air.typeOf(input, ip)),
+ }),
+ } };
+ }
+ } else if (std.mem.eql(u8, constraint, "r")) {
+ const input_vi = try isel.use(input);
+ input_mat.* = try input_vi.matReg(isel);
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (input_vi.size(isel)) {
+ 0 => unreachable,
+ 1...4 => input_mat.ra.w(),
+ 5...8 => input_mat.ra.x(),
+ else => return isel.fail("too big input type: '{f}'", .{
+ isel.fmtType(isel.air.typeOf(input, ip)),
+ }),
+ } };
+ }
+ } else if (std.mem.eql(u8, name, "_")) {
+ input_mat.vi = try isel.use(input);
+ } else return isel.fail("invalid constraint: '{s}'", .{constraint});
+ }
+
+ const clobbers = ip.indexToKey(extra.data.clobbers).aggregate;
+ const clobbers_ty: ZigType = .fromInterned(clobbers.ty);
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "nzcv")) continue;
+ const clobber_reg = Register.parse(clobber_name) orelse
+ return isel.fail("unable to parse clobber: '{s}'", .{clobber_name});
+ const live_vi = isel.live_registers.getPtr(clobber_reg.alias);
+ switch (live_vi.*) {
+ _ => {},
+ .allocating => return isel.fail("clobbered twice: '{s}'", .{clobber_name}),
+ .free => live_vi.* = .allocating,
+ }
+ }
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "nzcv")) continue;
+ const clobber_ra = Register.parse(clobber_name).?.alias;
+ const live_vi = isel.live_registers.getPtr(clobber_ra);
+ switch (live_vi.*) {
+ _ => {
+ if (!try isel.fill(clobber_ra))
+ return isel.fail("unable to clobber: '{s}'", .{clobber_name});
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ },
+ .allocating => {},
+ .free => unreachable,
+ }
+ }
+
+ as.source = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..])[0..extra.data.source_len :0];
+ const asm_start = isel.instructions.items.len;
+ while (as.nextInstruction() catch |err| switch (err) {
+ error.InvalidSyntax => {
+ const remaining_source = std.mem.span(as.source);
+ return isel.fail("unable to assemble: '{s}'", .{std.mem.trim(
+ u8,
+ as.source[0 .. std.mem.indexOfScalar(u8, remaining_source, '\n') orelse remaining_source.len],
+ &std.ascii.whitespace,
+ )});
+ },
+ }) |instruction| try isel.emit(instruction);
+ std.mem.reverse(codegen.aarch64.encoding.Instruction, isel.instructions.items[asm_start..]);
+
+ extra_index = inputs_extra_index;
+ for (input_mats) |input_mat| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(extra_bytes, 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
+ try input_mat.vi.liveOut(isel, input_mat.ra);
+ } else if (std.mem.eql(u8, constraint, "r")) {
+ try input_mat.finish(isel);
+ } else if (std.mem.eql(u8, name, "_")) {
+ try input_mat.vi.mat(isel);
+ } else unreachable;
+ }
+
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "cc")) continue;
+ isel.freeReg(Register.parse(clobber_name).?.alias);
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bit_and, .bit_or, .xor, .bool_and, .bool_or => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ const int_info: std.builtin.Type.Int = if (ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (ty.isAbiInt(zcu))
+ ty.intInfo(zcu)
+ else
+ return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var offset = res_vi.value.size(isel);
+ while (offset > 0) {
+ const size = @min(offset, 8);
+ offset -= size;
+ var res_part_it = res_vi.value.field(ty, offset, size);
+ const res_part_vi = try res_part_it.only(isel);
+ const res_part_ra = try res_part_vi.?.defReg(isel) orelse continue;
+ var lhs_part_it = lhs_vi.field(ty, offset, size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, offset, size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (air_tag) {
+ else => unreachable,
+ .bit_and, .bool_and => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .@"and"(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .@"and"(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ .bit_or, .bool_or => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .orr(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .orr(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ .xor => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .eor(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .eor(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .shr, .shr_exact, .shl, .shl_exact => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact, .shl_exact => {},
+ .shl => switch (bits) {
+ else => unreachable,
+ 1...31 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 32 => {},
+ 33...63 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ },
+ }
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (bits) {
+ else => unreachable,
+ 1...32 => switch (int_info.signedness) {
+ .signed => .asrv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .lsrv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ },
+ 33...64 => switch (int_info.signedness) {
+ .signed => .asrv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .lsrv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ },
+ .shl, .shl_exact => switch (bits) {
+ else => unreachable,
+ 1...32 => .lslv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ 33...64 => .lslv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ });
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ if (res_hi64_ra) |res_ra| switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact, .shl_exact => {},
+ .shl => switch (bits) {
+ else => unreachable,
+ 65...127 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 64 - 1),
+ }),
+ }),
+ 128 => {},
+ },
+ };
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const lhs_hi64_mat = lhs_hi64_mat: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (int_info.signedness) {
+ .signed => if (res_lo64_ra) |res_ra| isel.lockReg(res_ra) else .empty,
+ .unsigned => .empty,
+ },
+ .shl, .shl_exact => .empty,
+ };
+ defer res_lock.unlock(isel);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ break :lhs_hi64_mat try lhs_hi64_vi.?.matReg(isel);
+ };
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ const lhs_lo64_mat = try lhs_lo64_vi.?.matReg(isel);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lo64_ra = lo64_ra: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (int_info.signedness) {
+ .signed => if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ .unsigned => .empty,
+ },
+ .shl, .shl_exact => if (res_hi64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ };
+ defer res_lock.unlock(isel);
+ break :lo64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = hi64_ra: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ .shl, .shl_exact => .empty,
+ };
+ defer res_lock.unlock(isel);
+ break :hi64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(hi64_ra);
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => {
+ if (res_hi64_ra) |res_ra| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.csel(res_ra.x(), hi64_ra.x(), lo64_ra.x(), .eq));
+ try isel.emit(.sbfm(lo64_ra.x(), lhs_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(bits - 64 - 1),
+ .imms = @intCast(bits - 64 - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.csel(res_ra.x(), hi64_ra.x(), .xzr, .eq)),
+ };
+ if (res_lo64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), lo64_ra.x(), hi64_ra.x(), .eq));
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.asrv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x())),
+ .unsigned => try isel.emit(.lsrv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x())),
+ }
+ },
+ .shl, .shl_exact => {
+ if (res_lo64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), lo64_ra.x(), .xzr, .eq));
+ if (res_hi64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), hi64_ra.x(), lo64_ra.x(), .eq));
+ try isel.emit(.lslv(lo64_ra.x(), lhs_lo64_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ }
+ try isel.emit(.ands(.wzr, rhs_mat.ra.w(), .{ .immediate = .{ .N = .word, .immr = 32 - 6, .imms = 0 } }));
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => if (res_lo64_ra) |_| {
+ try isel.emit(.orr(
+ lo64_ra.x(),
+ lo64_ra.x(),
+ .{ .shifted_register = .{ .register = hi64_ra.x(), .shift = .{ .lsl = 1 } } },
+ ));
+ try isel.emit(.lslv(hi64_ra.x(), lhs_hi64_mat.ra.x(), hi64_ra.x()));
+ try isel.emit(.lsrv(lo64_ra.x(), lhs_lo64_mat.ra.x(), rhs_mat.ra.x()));
+ try isel.emit(.orn(hi64_ra.w(), .wzr, .{ .register = rhs_mat.ra.w() }));
+ },
+ .shl, .shl_exact => if (res_hi64_ra) |_| {
+ try isel.emit(.orr(
+ hi64_ra.x(),
+ hi64_ra.x(),
+ .{ .shifted_register = .{ .register = lo64_ra.x(), .shift = .{ .lsr = 1 } } },
+ ));
+ try isel.emit(.lsrv(lo64_ra.x(), lhs_lo64_mat.ra.x(), lo64_ra.x()));
+ try isel.emit(.lslv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x()));
+ try isel.emit(.orn(lo64_ra.w(), .wzr, .{ .register = rhs_mat.ra.w() }));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_lo64_mat.finish(isel);
+ try lhs_hi64_mat.finish(isel);
+ break :unused;
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .not => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ const int_info: std.builtin.Type.Int = int_info: {
+ if (ty_op.ty == .bool_type) break :int_info .{ .signedness = .unsigned, .bits = 1 };
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ break :int_info ty.intInfo(zcu);
+ };
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const src_vi = try isel.use(ty_op.operand);
+ var offset = res_vi.value.size(isel);
+ while (offset > 0) {
+ const size = @min(offset, 8);
+ offset -= size;
+ var res_part_it = res_vi.value.field(ty, offset, size);
+ const res_part_vi = try res_part_it.only(isel);
+ const res_part_ra = try res_part_vi.?.defReg(isel) orelse continue;
+ var src_part_it = src_vi.field(ty, offset, size);
+ const src_part_vi = try src_part_it.only(isel);
+ const src_part_mat = try src_part_vi.?.matReg(isel);
+ try isel.emit(switch (int_info.signedness) {
+ .signed => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .orn(res_part_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 8 => .orn(res_part_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ },
+ .unsigned => switch (@min(int_info.bits - 8 * offset, 64)) {
+ else => unreachable,
+ 1...31 => |bits| .eor(res_part_ra.w(), src_part_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ } }),
+ 32 => .orn(res_part_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 33...63 => |bits| .eor(res_part_ra.x(), src_part_mat.ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ } }),
+ 64 => .orn(res_part_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ },
+ });
+ try src_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bitcast => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_tag = dst_ty.zigTypeTag(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_tag = src_ty.zigTypeTag(zcu);
+ if (dst_ty.isAbiInt(zcu) and (src_tag == .bool or src_ty.isAbiInt(zcu))) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_int_info: std.builtin.Type.Int = if (src_tag == .bool) .{ .signedness = undefined, .bits = 1 } else src_ty.intInfo(zcu);
+ assert(dst_int_info.bits == src_int_info.bits);
+ if (dst_tag != .@"struct" and src_tag != .@"struct" and src_tag != .bool and dst_int_info.signedness == src_int_info.signedness) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else switch (dst_int_info.bits) {
+ 0 => unreachable,
+ 1...31 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ },
+ 32 => try dst_vi.value.move(isel, ty_op.operand),
+ 33...63 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ },
+ 64 => try dst_vi.value.move(isel, ty_op.operand),
+ 65...127 => |dst_bits| {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ });
+ try src_hi64_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ 128 => try dst_vi.value.move(isel, ty_op.operand),
+ else => return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ } else if ((dst_ty.isPtrAtRuntime(zcu) or dst_ty.isAbiInt(zcu)) and (src_ty.isPtrAtRuntime(zcu) or src_ty.isAbiInt(zcu))) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_ty.isSliceAtRuntime(zcu) and src_ty.isSliceAtRuntime(zcu)) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_tag == .error_union and src_tag == .error_union) {
+ assert(dst_ty.errorUnionSet(zcu).hasRuntimeBitsIgnoreComptime(zcu) ==
+ src_ty.errorUnionSet(zcu).hasRuntimeBitsIgnoreComptime(zcu));
+ if (dst_ty.errorUnionPayload(zcu).toIntern() == src_ty.errorUnionPayload(zcu).toIntern()) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else if (dst_tag == .float and src_tag == .float) {
+ assert(dst_ty.floatBits(isel.target) == src_ty.floatBits(isel.target));
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_ty.isAbiInt(zcu) and src_tag == .float) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ assert(dst_int_info.bits == src_ty.floatBits(isel.target));
+ switch (dst_int_info.bits) {
+ else => unreachable,
+ 16 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (dst_int_info.signedness) {
+ .signed => try isel.emit(.smov(dst_ra.w(), src_mat.ra.@"h[]"(0))),
+ .unsigned => try isel.emit(if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fmov(dst_ra.w(), .{ .register = src_mat.ra.h() })
+ else
+ .umov(dst_ra.w(), src_mat.ra.@"h[]"(0))),
+ }
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.w(), .{ .register = src_mat.ra.s() }));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.x(), .{ .register = src_mat.ra.d() }));
+ try src_mat.finish(isel);
+ },
+ 80 => switch (dst_int_info.signedness) {
+ .signed => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ if (try dst_hi16_vi.?.defReg(isel)) |dst_hi16_ra| {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.sbfm(dst_hi16_ra.x(), src_hi16_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = 16 - 1,
+ }));
+ try src_hi16_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => try dst_vi.value.move(isel, ty_op.operand),
+ },
+ 128 => {
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| try isel.emit(.fmov(dst_hi64_ra.x(), .{ .register = src_mat.ra.@"d[]"(1) }));
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| try isel.emit(.fmov(dst_lo64_ra.x(), .{ .register = src_mat.ra.d() }));
+ try src_mat.finish(isel);
+ },
+ }
+ } else if (dst_tag == .float and src_ty.isAbiInt(zcu)) {
+ const src_int_info = src_ty.intInfo(zcu);
+ assert(dst_ty.floatBits(isel.target) == src_int_info.bits);
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 16 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(
+ if (isel.target.cpu.has(.aarch64, .fullfp16)) dst_ra.h() else dst_ra.s(),
+ .{ .register = src_mat.ra.w() },
+ ));
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.s(), .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.d(), .{ .register = src_mat.ra.x() }));
+ try src_mat.finish(isel);
+ },
+ 80 => switch (src_int_info.signedness) {
+ .signed => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ if (try dst_hi16_vi.?.defReg(isel)) |dst_hi16_ra| {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.ubfm(dst_hi16_ra.x(), src_hi16_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = 16 - 1,
+ }));
+ try src_hi16_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => try dst_vi.value.move(isel, ty_op.operand),
+ },
+ 128 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(.fmov(dst_ra.@"d[]"(1), .{ .register = src_hi64_mat.ra.x() }));
+ try src_hi64_mat.finish(isel);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ try isel.emit(.fmov(dst_ra.d(), .{ .register = src_lo64_mat.ra.x() }));
+ try src_lo64_mat.finish(isel);
+ },
+ }
+ } else if (dst_ty.isAbiInt(zcu) and src_tag == .array and src_ty.childType(zcu).isAbiInt(zcu)) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_child_int_info = src_ty.childType(zcu).intInfo(zcu);
+ const src_len = src_ty.arrayLenIncludingSentinel(zcu);
+ assert(dst_int_info.bits == src_child_int_info.bits * src_len);
+ const src_child_size = src_ty.childType(zcu).abiSize(zcu);
+ if (8 * src_child_size == src_child_int_info.bits) {
+ try dst_vi.value.defAddr(isel, dst_ty, dst_int_info, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, src_child_size * src_len);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else if (dst_tag == .array and dst_ty.childType(zcu).isAbiInt(zcu) and src_ty.isAbiInt(zcu)) {
+ const dst_child_int_info = dst_ty.childType(zcu).intInfo(zcu);
+ const src_int_info = src_ty.intInfo(zcu);
+ const dst_len = dst_ty.arrayLenIncludingSentinel(zcu);
+ assert(dst_child_int_info.bits * dst_len == src_int_info.bits);
+ const dst_child_size = dst_ty.childType(zcu).abiSize(zcu);
+ if (8 * dst_child_size == dst_child_int_info.bits) {
+ try dst_vi.value.defAddr(isel, dst_ty, null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, dst_child_size * dst_len);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .block => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+ try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
+ isel.air.extra.items[extra.end..][0..extra.data.body_len],
+ ));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .loop => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+ const loops = isel.loops.values();
+ const loop_index = isel.loops.getIndex(air.inst_index).?;
+ const loop = &loops[loop_index];
+
+ tracking_log.debug("{f}", .{
+ isel.fmtDom(air.inst_index, loop.dom, @intCast(isel.blocks.count())),
+ });
+ tracking_log.debug("{f}", .{isel.fmtLoopLive(air.inst_index)});
+ assert(loop.depth == isel.blocks.count());
+
+ if (false) {
+ // loops are dumb...
+ for (isel.loop_live.list.items[loop.live..loops[loop_index + 1].live]) |live_inst| {
+ const live_vi = try isel.use(live_inst.toRef());
+ try live_vi.mat(isel);
+ }
+
+ // IT'S DOM TIME!!!
+ for (isel.blocks.values(), 0..) |*dom_block, dom_index| {
+ if (@as(u1, @truncate(isel.dom.items[
+ loop.dom + dom_index / @bitSizeOf(DomInt)
+ ] >> @truncate(dom_index))) == 0) continue;
+ var live_reg_it = dom_block.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => |live_vi| try live_vi.mat(isel),
+ .allocating => unreachable,
+ .free => {},
+ };
+ }
+ }
+
+ loop.live_registers = isel.live_registers;
+ loop.repeat_list = Loop.empty_list;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.merge(&loop.live_registers, .{ .fill_extra = true });
+
+ var repeat_label = loop.repeat_list;
+ assert(repeat_label != Loop.empty_list);
+ while (repeat_label != Loop.empty_list) {
+ const instruction = &isel.instructions.items[repeat_label];
+ const next_repeat_label = instruction.*;
+ instruction.* = .b(-@as(i28, @intCast((isel.instructions.items.len - 1 - repeat_label) << 2)));
+ repeat_label = @bitCast(next_repeat_label);
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .repeat => {
+ const repeat = air.data(air.inst_index).repeat;
+ try isel.loops.getPtr(repeat.loop_inst).?.branch(isel);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .br => {
+ const br = air.data(air.inst_index).br;
+ try isel.blocks.getPtr(br.block_inst).?.branch(isel);
+ if (isel.live_values.get(br.block_inst)) |dst_vi| try dst_vi.move(isel, br.operand);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .trap => {
+ try isel.emit(.brk(0x1));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .breakpoint => {
+ try isel.emit(.brk(0xf000));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ret_addr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |addr_vi| unused: {
+ defer addr_vi.value.deref(isel);
+ const addr_ra = try addr_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.ldr(addr_ra.x(), .{ .unsigned_offset = .{ .base = .fp, .offset = 8 } }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .frame_addr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |addr_vi| unused: {
+ defer addr_vi.value.deref(isel);
+ const addr_ra = try addr_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.orr(addr_ra.x(), .xzr, .{ .register = .fp }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .call => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
+ const callee_ty = isel.air.typeOf(pl_op.operand, ip);
+ const func_info = switch (ip.indexToKey(callee_ty.toIntern())) {
+ else => unreachable,
+ .func_type => |func_type| func_type,
+ .ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type,
+ };
+
+ try call.prepareReturn(isel);
+ const maybe_def_ret_vi = isel.live_values.fetchRemove(air.inst_index);
+ var maybe_ret_addr_vi: ?Value.Index = null;
+ if (maybe_def_ret_vi) |def_ret_vi| {
+ defer def_ret_vi.value.deref(isel);
+
+ var ret_it: CallAbiIterator = .init;
+ const ret_vi = try ret_it.ret(isel, isel.air.typeOfIndex(air.inst_index, ip));
+ defer ret_vi.?.deref(isel);
+ switch (ret_vi.?.parent(isel)) {
+ .unallocated, .stack_slot => if (ret_vi.?.hint(isel)) |ret_ra| {
+ try call.returnLiveIn(isel, def_ret_vi.value, ret_ra);
+ } else {
+ var def_ret_part_it = def_ret_vi.value.parts(isel);
+ var ret_part_it = ret_vi.?.parts(isel);
+ while (def_ret_part_it.next()) |ret_part_vi| {
+ try call.returnLiveIn(isel, ret_part_vi, ret_part_it.next().?.hint(isel).?);
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ maybe_ret_addr_vi = address_vi;
+ _ = try def_ret_vi.value.defAddr(
+ isel,
+ isel.air.typeOfIndex(air.inst_index, ip),
+ null,
+ &call.caller_saved_regs,
+ );
+ },
+ }
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ if (pl_op.operand.toInterned()) |ct_callee| {
+ try isel.nav_relocs.append(gpa, switch (ip.indexToKey(ct_callee)) {
+ else => unreachable,
+ inline .@"extern", .func => |func| .{
+ .nav = func.owner_nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ },
+ .ptr => |ptr| .{
+ .nav = ptr.base_addr.nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ },
+ });
+ try isel.emit(.bl(0));
+ } else {
+ const callee_vi = try isel.use(pl_op.operand);
+ const callee_mat = try callee_vi.matReg(isel);
+ try isel.emit(.blr(callee_mat.ra.x()));
+ try callee_mat.finish(isel);
+ }
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ if (maybe_ret_addr_vi) |ret_addr_vi| try call.paramAddress(
+ isel,
+ maybe_def_ret_vi.?.value,
+ ret_addr_vi.hint(isel).?,
+ );
+ var param_it: CallAbiIterator = .init;
+ for (args, 0..) |arg, arg_index| {
+ const param_ty = isel.air.typeOf(arg, ip);
+ const param_vi = param_vi: {
+ if (arg_index >= func_info.param_types.len) {
+ assert(func_info.is_var_args);
+ switch (isel.va_list) {
+ .other => break :param_vi try param_it.nonSysvVarArg(isel, param_ty),
+ .sysv => {},
+ }
+ }
+ break :param_vi try param_it.param(isel, param_ty);
+ } orelse continue;
+ defer param_vi.deref(isel);
+ const arg_vi = try isel.use(arg);
+ switch (param_vi.parent(isel)) {
+ .unallocated => if (param_vi.hint(isel)) |param_ra| {
+ try call.paramLiveOut(isel, arg_vi, param_ra);
+ } else {
+ var param_part_it = param_vi.parts(isel);
+ var arg_part_it = arg_vi.parts(isel);
+ if (arg_part_it.only()) |_| {
+ try isel.values.ensureUnusedCapacity(gpa, param_part_it.remaining);
+ arg_vi.setParts(isel, param_part_it.remaining);
+ while (param_part_it.next()) |param_part_vi| _ = arg_vi.addPart(
+ isel,
+ param_part_vi.get(isel).offset_from_parent,
+ param_part_vi.size(isel),
+ );
+ param_part_it = param_vi.parts(isel);
+ arg_part_it = arg_vi.parts(isel);
+ }
+ while (param_part_it.next()) |param_part_vi| {
+ const arg_part_vi = arg_part_it.next().?;
+ assert(arg_part_vi.get(isel).offset_from_parent ==
+ param_part_vi.get(isel).offset_from_parent);
+ assert(arg_part_vi.size(isel) == param_part_vi.size(isel));
+ try call.paramLiveOut(isel, arg_part_vi, param_part_vi.hint(isel).?);
+ }
+ },
+ .stack_slot => |stack_slot| try arg_vi.store(isel, param_ty, stack_slot.base, .{
+ .offset = @intCast(stack_slot.offset),
+ }),
+ .value, .constant => unreachable,
+ .address => |address_vi| try call.paramAddress(isel, arg_vi, address_vi.hint(isel).?),
+ }
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .clz => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.clzLimb(res_ra, int_info, src_mat.ra);
+ try src_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ try isel.emit(.csel(res_ra.w(), lo64_ra.w(), hi64_ra.w(), .eq));
+ try isel.emit(.add(lo64_ra.w(), lo64_ra.w(), .{ .immediate = @intCast(bits - 64) }));
+ try isel.emit(.subs(.xzr, src_hi64_mat.ra.x(), .{ .immediate = 0 }));
+ try isel.clzLimb(hi64_ra, .{ .signedness = int_info.signedness, .bits = bits - 64 }, src_hi64_mat.ra);
+ try isel.clzLimb(lo64_ra, .{ .signedness = .unsigned, .bits = 64 }, src_lo64_mat.ra);
+ try src_hi64_mat.finish(isel);
+ try src_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ctz => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.ctzLimb(res_ra, int_info, src_mat.ra);
+ try src_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ try isel.emit(.csel(res_ra.w(), lo64_ra.w(), hi64_ra.w(), .ne));
+ try isel.emit(.add(hi64_ra.w(), hi64_ra.w(), .{ .immediate = 64 }));
+ try isel.emit(.subs(.xzr, src_lo64_mat.ra.x(), .{ .immediate = 0 }));
+ try isel.ctzLimb(hi64_ra, .{ .signedness = .unsigned, .bits = 64 }, src_hi64_mat.ra);
+ try isel.ctzLimb(lo64_ra, .{ .signedness = int_info.signedness, .bits = bits - 64 }, src_lo64_mat.ra);
+ try src_hi64_mat.finish(isel);
+ try src_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .popcount => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const vec_ra = try isel.allocVecReg();
+ defer isel.freeReg(vec_ra);
+ try isel.emit(.umov(res_ra.w(), vec_ra.@"b[]"(0)));
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...8 => {},
+ 9...16 => try isel.emit(.addp(vec_ra.@"8b"(), vec_ra.@"8b"(), .{ .vector = vec_ra.@"8b"() })),
+ 17...64 => try isel.emit(.addv(vec_ra.b(), vec_ra.@"8b"())),
+ }
+ try isel.emit(.cnt(vec_ra.@"8b"(), vec_ra.@"8b"()));
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.fmov(vec_ra.s(), .{ .register = res_ra.w() }));
+ try isel.emit(.ubfm(res_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.fmov(vec_ra.s(), .{ .register = src_mat.ra.w() })),
+ },
+ 32 => try isel.emit(.fmov(vec_ra.s(), .{ .register = src_mat.ra.w() })),
+ 33...63 => |bits| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.fmov(vec_ra.d(), .{ .register = res_ra.x() }));
+ try isel.emit(.ubfm(res_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.fmov(vec_ra.d(), .{ .register = src_mat.ra.x() })),
+ },
+ 64 => try isel.emit(.fmov(vec_ra.d(), .{ .register = src_mat.ra.x() })),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .byte_swap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ if (int_info.bits == 8) break :unused try res_vi.value.move(isel, ty_op.operand);
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (int_info.bits) {
+ else => unreachable,
+ 16 => switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 16,
+ .imms = 32 - 1,
+ }));
+ try isel.emit(.rev(res_ra.w(), src_mat.ra.w()));
+ },
+ .unsigned => try isel.emit(.rev16(res_ra.w(), src_mat.ra.w())),
+ },
+ 24 => {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 24,
+ .imms = 32 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 24,
+ .imms = 32 - 1,
+ })),
+ }
+ try isel.emit(.rev(res_ra.w(), src_mat.ra.w()));
+ },
+ 32 => try isel.emit(.rev(res_ra.w(), src_mat.ra.w())),
+ 40, 48, 56 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ }
+ try isel.emit(.rev(res_ra.x(), src_mat.ra.x()));
+ },
+ 64 => try isel.emit(.rev(res_ra.x(), src_mat.ra.x())),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bit_reverse => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = 32 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = 32 - 1,
+ })),
+ }
+ try isel.emit(.rbit(res_ra.w(), src_mat.ra.w()));
+ },
+ 32 => try isel.emit(.rbit(res_ra.w(), src_mat.ra.w())),
+ 33...63 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ }
+ try isel.emit(.rbit(res_ra.x(), src_mat.ra.x()));
+ },
+ 64 => try isel.emit(.rbit(res_ra.x(), src_mat.ra.x())),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .sqrt, .floor, .ceil, .round, .trunc_float => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ const src_ra = if (need_fcvt) try isel.allocVecReg() else src_mat.ra;
+ defer if (need_fcvt) isel.freeReg(src_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.h(), src_ra.h()),
+ .floor => .frintm(res_ra.h(), src_ra.h()),
+ .ceil => .frintp(res_ra.h(), src_ra.h()),
+ .round => .frinta(res_ra.h(), src_ra.h()),
+ .trunc_float => .frintz(res_ra.h(), src_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.s(), src_ra.s()),
+ .floor => .frintm(res_ra.s(), src_ra.s()),
+ .ceil => .frintp(res_ra.s(), src_ra.s()),
+ .round => .frinta(res_ra.s(), src_ra.s()),
+ .trunc_float => .frintz(res_ra.s(), src_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.d(), src_ra.d()),
+ .floor => .frintm(res_ra.d(), src_ra.d()),
+ .ceil => .frintp(res_ra.d(), src_ra.d()),
+ .round => .frinta(res_ra.d(), src_ra.d()),
+ .trunc_float => .frintz(res_ra.d(), src_ra.d()),
+ },
+ });
+ if (need_fcvt) try isel.emit(.fcvt(src_ra.s(), src_mat.ra.h()));
+ try src_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (air_tag) {
+ else => unreachable,
+ .sqrt => switch (bits) {
+ else => unreachable,
+ 16 => "__sqrth",
+ 32 => "sqrtf",
+ 64 => "sqrt",
+ 80 => "__sqrtx",
+ 128 => "sqrtq",
+ },
+ .floor => switch (bits) {
+ else => unreachable,
+ 16 => "__floorh",
+ 32 => "floorf",
+ 64 => "floor",
+ 80 => "__floorx",
+ 128 => "floorq",
+ },
+ .ceil => switch (bits) {
+ else => unreachable,
+ 16 => "__ceilh",
+ 32 => "ceilf",
+ 64 => "ceil",
+ 80 => "__ceilx",
+ 128 => "ceilq",
+ },
+ .round => switch (bits) {
+ else => unreachable,
+ 16 => "__roundh",
+ 32 => "roundf",
+ 64 => "round",
+ 80 => "__roundx",
+ 128 => "roundq",
+ },
+ .trunc_float => switch (bits) {
+ else => unreachable,
+ 16 => "__trunch",
+ 32 => "truncf",
+ 64 => "trunc",
+ 80 => "__truncx",
+ 128 => "truncq",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(un_op);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .sin, .cos, .tan, .exp, .exp2, .log, .log2, .log10 => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ const bits = ty.floatBits(isel.target);
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (air_tag) {
+ else => unreachable,
+ .sin => switch (bits) {
+ else => unreachable,
+ 16 => "__sinh",
+ 32 => "sinf",
+ 64 => "sin",
+ 80 => "__sinx",
+ 128 => "sinq",
+ },
+ .cos => switch (bits) {
+ else => unreachable,
+ 16 => "__cosh",
+ 32 => "cosf",
+ 64 => "cos",
+ 80 => "__cosx",
+ 128 => "cosq",
+ },
+ .tan => switch (bits) {
+ else => unreachable,
+ 16 => "__tanh",
+ 32 => "tanf",
+ 64 => "tan",
+ 80 => "__tanx",
+ 128 => "tanq",
+ },
+ .exp => switch (bits) {
+ else => unreachable,
+ 16 => "__exph",
+ 32 => "expf",
+ 64 => "exp",
+ 80 => "__expx",
+ 128 => "expq",
+ },
+ .exp2 => switch (bits) {
+ else => unreachable,
+ 16 => "__exp2h",
+ 32 => "exp2f",
+ 64 => "exp2",
+ 80 => "__exp2x",
+ 128 => "exp2q",
+ },
+ .log => switch (bits) {
+ else => unreachable,
+ 16 => "__logh",
+ 32 => "logf",
+ 64 => "log",
+ 80 => "__logx",
+ 128 => "logq",
+ },
+ .log2 => switch (bits) {
+ else => unreachable,
+ 16 => "__log2h",
+ 32 => "log2f",
+ 64 => "log2",
+ 80 => "__log2x",
+ 128 => "log2q",
+ },
+ .log10 => switch (bits) {
+ else => unreachable,
+ 16 => "__log10h",
+ 32 => "log10f",
+ 64 => "log10",
+ 80 => "__log10x",
+ 128 => "log10q",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(un_op);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .abs => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ switch (ty.intInfo(zcu).bits) {
+ 0 => unreachable,
+ 1...32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.csneg(res_ra.w(), src_mat.ra.w(), src_mat.ra.w(), .pl));
+ try isel.emit(.subs(.wzr, src_mat.ra.w(), .{ .immediate = 0 }));
+ try src_mat.finish(isel);
+ },
+ 33...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.csneg(res_ra.x(), src_mat.ra.x(), src_mat.ra.x(), .pl));
+ try isel.emit(.subs(.xzr, src_mat.ra.x(), .{ .immediate = 0 }));
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra, const mask_ra = alloc_ras: {
+ const res_lo64_lock: RegLock = if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty;
+ defer res_lo64_lock.unlock(isel);
+ break :alloc_ras .{ try isel.allocIntReg(), try isel.allocIntReg() };
+ };
+ defer {
+ isel.freeReg(hi64_ra);
+ isel.freeReg(mask_ra);
+ }
+ if (res_hi64_ra) |res_ra| try isel.emit(.sbc(res_ra.x(), hi64_ra.x(), mask_ra.x()));
+ try isel.emit(.subs(
+ if (res_lo64_ra) |res_ra| res_ra.x() else .xzr,
+ lo64_ra.x(),
+ .{ .register = mask_ra.x() },
+ ));
+ if (res_hi64_ra) |_| try isel.emit(.eor(hi64_ra.x(), src_hi64_mat.ra.x(), .{ .register = mask_ra.x() }));
+ try isel.emit(.eor(lo64_ra.x(), src_lo64_mat.ra.x(), .{ .register = mask_ra.x() }));
+ try isel.emit(.sbfm(mask_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 64 - 1,
+ .imms = 64 - 1,
+ }));
+ try src_lo64_mat.finish(isel);
+ try src_hi64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fabs(res_ra.h(), src_mat.ra.h())
+ else
+ .bic(res_ra.@"4h"(), res_ra.@"4h"(), .{ .shifted_immediate = .{
+ .immediate = 0b10000000,
+ .lsl = 8,
+ } }));
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fabs(res_ra.s(), src_mat.ra.s()));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fabs(res_ra.d(), src_mat.ra.d()));
+ try src_mat.finish(isel);
+ },
+ 80 => {
+ const src_vi = try isel.use(ty_op.operand);
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ if (try res_hi16_vi.?.defReg(isel)) |res_hi16_ra| {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.@"and"(res_hi16_ra.w(), src_hi16_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = 15 - 1,
+ } }));
+ try src_hi16_mat.finish(isel);
+ }
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ if (try res_lo64_vi.?.defReg(isel)) |res_lo64_ra| {
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, res_lo64_ra);
+ }
+ },
+ 128 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.bic(res_ra.@"16b"(), src_mat.ra.@"16b"(), .{ .register = neg_zero_ra.@"16b"() }));
+ try isel.literals.appendNTimes(gpa, 0, -%isel.literals.items.len % 4);
+ try isel.literal_relocs.append(gpa, .{
+ .label = @intCast(isel.instructions.items.len),
+ });
+ try isel.emit(.ldr(neg_zero_ra.q(), .{
+ .literal = @intCast((isel.instructions.items.len + 1 + isel.literals.items.len) << 2),
+ }));
+ try isel.emitLiteral(&(.{0} ** 15 ++ .{0x80}));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .neg, .neg_optimized => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ if (isel.target.cpu.has(.aarch64, .fullfp16)) {
+ try isel.emit(.fneg(res_ra.h(), src_mat.ra.h()));
+ } else {
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.eor(res_ra.@"8b"(), res_ra.@"8b"(), .{ .register = neg_zero_ra.@"8b"() }));
+ try isel.emit(.movi(neg_zero_ra.@"4h"(), 0b10000000, .{ .lsl = 8 }));
+ }
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fneg(res_ra.s(), src_mat.ra.s()));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fneg(res_ra.d(), src_mat.ra.d()));
+ try src_mat.finish(isel);
+ },
+ 80 => {
+ const src_vi = try isel.use(un_op);
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ if (try res_hi16_vi.?.defReg(isel)) |res_hi16_ra| {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.eor(res_hi16_ra.w(), src_hi16_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 32 - 15,
+ .imms = 1 - 1,
+ } }));
+ try src_hi16_mat.finish(isel);
+ }
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ if (try res_lo64_vi.?.defReg(isel)) |res_lo64_ra| {
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, res_lo64_ra);
+ }
+ },
+ 128 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.eor(res_ra.@"16b"(), src_mat.ra.@"16b"(), .{ .register = neg_zero_ra.@"16b"() }));
+ try isel.literals.appendNTimes(gpa, 0, -%isel.literals.items.len % 4);
+ try isel.literal_relocs.append(gpa, .{
+ .label = @intCast(isel.instructions.items.len),
+ });
+ try isel.emit(.ldr(neg_zero_ra.q(), .{
+ .literal = @intCast((isel.instructions.items.len + 1 + isel.literals.items.len) << 2),
+ }));
+ try isel.emitLiteral(&(.{0} ** 15 ++ .{0x80}));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ var bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ const int_info: std.builtin.Type.Int = if (ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (ty.isAbiInt(zcu))
+ ty.intInfo(zcu)
+ else if (ty.isPtrAtRuntime(zcu))
+ .{ .signedness = .unsigned, .bits = 64 }
+ else
+ return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ if (int_info.bits > 256) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(cond: switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => switch (int_info.signedness) {
+ .signed => .lt,
+ .unsigned => .lo,
+ },
+ .cmp_lte => switch (int_info.bits) {
+ else => unreachable,
+ 1...64 => switch (int_info.signedness) {
+ .signed => .le,
+ .unsigned => .ls,
+ },
+ 65...128 => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_gte;
+ },
+ },
+ .cmp_eq => .eq,
+ .cmp_gte => switch (int_info.signedness) {
+ .signed => .ge,
+ .unsigned => .hs,
+ },
+ .cmp_gt => switch (int_info.bits) {
+ else => unreachable,
+ 1...64 => switch (int_info.signedness) {
+ .signed => .gt,
+ .unsigned => .hi,
+ },
+ 65...128 => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lt;
+ },
+ },
+ .cmp_neq => .ne,
+ })));
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var part_offset = lhs_vi.size(isel);
+ while (part_offset > 0) {
+ const part_size = @min(part_offset, 8);
+ part_offset -= part_size;
+ var lhs_part_it = lhs_vi.field(ty, part_offset, part_size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, part_offset, part_size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (part_offset) {
+ 0 => .subs(.wzr, lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ else => switch (air_tag) {
+ else => unreachable,
+ .cmp_lt, .cmp_lte, .cmp_gte, .cmp_gt => .sbcs(
+ .wzr,
+ lhs_part_mat.ra.w(),
+ rhs_part_mat.ra.w(),
+ ),
+ .cmp_eq, .cmp_neq => .ccmp(
+ lhs_part_mat.ra.w(),
+ .{ .register = rhs_part_mat.ra.w() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ),
+ },
+ },
+ 5...8 => switch (part_offset) {
+ 0 => .subs(.xzr, lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ else => switch (air_tag) {
+ else => unreachable,
+ .cmp_lt, .cmp_lte, .cmp_gte, .cmp_gt => .sbcs(
+ .xzr,
+ lhs_part_mat.ra.x(),
+ rhs_part_mat.ra.x(),
+ ),
+ .cmp_eq, .cmp_neq => .ccmp(
+ lhs_part_mat.ra.x(),
+ .{ .register = rhs_part_mat.ra.x() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ),
+ },
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => .lo,
+ .cmp_lte => .ls,
+ .cmp_eq => .eq,
+ .cmp_gte => .ge,
+ .cmp_gt => .gt,
+ .cmp_neq => .ne,
+ })));
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fcmp(lhs_ra.h(), .{ .register = rhs_ra.h() }),
+ 32 => .fcmp(lhs_ra.s(), .{ .register = rhs_ra.s() }),
+ 64 => .fcmp(lhs_ra.d(), .{ .register = rhs_ra.d() }),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.returnFill(isel, .r0);
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(cond: switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => .lt,
+ .cmp_lte => .le,
+ .cmp_eq => .eq,
+ .cmp_gte => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lte;
+ },
+ .cmp_gt => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lt;
+ },
+ .cmp_neq => .ne,
+ })));
+ try isel.emit(.subs(.wzr, .w0, .{ .immediate = 0 }));
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__cmphf2",
+ 32 => "__cmpsf2",
+ 64 => "__cmpdf2",
+ 80 => "__cmpxf2",
+ 128 => "__cmptf2",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .cond_br => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
+
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
+ const else_label = isel.instructions.items.len;
+ const else_live_registers = isel.live_registers;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
+ try isel.merge(&else_live_registers, .{});
+
+ const cond_vi = try isel.use(pl_op.operand);
+ const cond_mat = try cond_vi.matReg(isel);
+ try isel.emit(.tbz(
+ cond_mat.ra.x(),
+ 0,
+ @intCast((isel.instructions.items.len + 1 - else_label) << 2),
+ ));
+ try cond_mat.finish(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air.inst_index);
+ const cond_ty = isel.air.typeOf(switch_br.operand, ip);
+ const cond_int_info: std.builtin.Type.Int = if (cond_ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (cond_ty.isAbiInt(zcu))
+ cond_ty.intInfo(zcu)
+ else
+ return isel.fail("bad switch cond {f}", .{isel.fmtType(cond_ty)});
+
+ var final_case = true;
+ if (switch_br.else_body_len > 0) {
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |_| {}
+ try isel.body(cases_it.elseBody());
+ assert(final_case);
+ final_case = false;
+ }
+ const zero_reg: Register = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => .wzr,
+ 33...64 => .xzr,
+ };
+ var cond_mat: ?Value.Materialize = null;
+ var cond_reg: Register = undefined;
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| {
+ const next_label = isel.instructions.items.len;
+ const next_live_registers = isel.live_registers;
+ try isel.body(case.body);
+ if (final_case) {
+ final_case = false;
+ continue;
+ }
+ try isel.merge(&next_live_registers, .{});
+ if (cond_mat == null) {
+ var cond_vi = try isel.use(switch_br.operand);
+ cond_mat = try cond_vi.matReg(isel);
+ cond_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => cond_mat.?.ra.w(),
+ 33...64 => cond_mat.?.ra.x(),
+ };
+ }
+ if (case.ranges.len == 0 and case.items.len == 1 and Constant.fromInterned(
+ case.items[0].toInterned().?,
+ ).orderAgainstZero(zcu).compare(.eq)) {
+ try isel.emit(.cbnz(
+ cond_reg,
+ @intCast((isel.instructions.items.len + 1 - next_label) << 2),
+ ));
+ continue;
+ }
+ try isel.emit(.@"b."(
+ .invert(switch (case.ranges.len) {
+ 0 => .eq,
+ else => .ls,
+ }),
+ @intCast((isel.instructions.items.len + 1 - next_label) << 2),
+ ));
+ var case_range_index = case.ranges.len;
+ while (case_range_index > 0) {
+ case_range_index -= 1;
+
+ const low_val: Constant = .fromInterned(case.ranges[case_range_index][0].toInterned().?);
+ var low_bigint_space: Constant.BigIntSpace = undefined;
+ const low_bigint = low_val.toBigInt(&low_bigint_space, zcu);
+ const low_int: i64 = if (low_bigint.positive) @bitCast(
+ low_bigint.toInt(u64) catch
+ return isel.fail("too big case range start: {f}", .{isel.fmtConstant(low_val)}),
+ ) else low_bigint.toInt(i64) catch
+ return isel.fail("too big case range start: {f}", .{isel.fmtConstant(low_val)});
+
+ const high_val: Constant = .fromInterned(case.ranges[case_range_index][1].toInterned().?);
+ var high_bigint_space: Constant.BigIntSpace = undefined;
+ const high_bigint = high_val.toBigInt(&high_bigint_space, zcu);
+ const high_int: i64 = if (high_bigint.positive) @bitCast(
+ high_bigint.toInt(u64) catch
+ return isel.fail("too big case range end: {f}", .{isel.fmtConstant(high_val)}),
+ ) else high_bigint.toInt(i64) catch
+ return isel.fail("too big case range end: {f}", .{isel.fmtConstant(high_val)});
+
+ const adjusted_ra = switch (low_int) {
+ 0 => cond_mat.?.ra,
+ else => try isel.allocIntReg(),
+ };
+ defer if (adjusted_ra != cond_mat.?.ra) isel.freeReg(adjusted_ra);
+ const adjusted_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => adjusted_ra.w(),
+ 33...64 => adjusted_ra.x(),
+ };
+ const delta_int = high_int -% low_int;
+ if (case_range_index | case.items.len > 0) {
+ if (std.math.cast(u5, delta_int)) |pos_imm| try isel.emit(.ccmp(
+ adjusted_reg,
+ .{ .immediate = pos_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ if (case_range_index > 0) .hi else .ne,
+ )) else if (std.math.cast(u5, -delta_int)) |neg_imm| try isel.emit(.ccmn(
+ adjusted_reg,
+ .{ .immediate = neg_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ if (case_range_index > 0) .hi else .ne,
+ )) else {
+ const imm_ra = try isel.allocIntReg();
+ defer isel.freeReg(imm_ra);
+ const imm_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => imm_ra.w(),
+ 33...64 => imm_ra.x(),
+ };
+ try isel.emit(.ccmp(
+ cond_reg,
+ .{ .register = imm_reg },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ if (case_range_index > 0) .hi else .ne,
+ ));
+ try isel.movImmediate(imm_reg, @bitCast(delta_int));
+ }
+ } else {
+ if (std.math.cast(u12, delta_int)) |pos_imm| try isel.emit(.subs(
+ zero_reg,
+ adjusted_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -delta_int)) |neg_imm| try isel.emit(.adds(
+ zero_reg,
+ adjusted_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(delta_int)) == 0)
+ std.math.cast(u12, delta_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.subs(
+ zero_reg,
+ adjusted_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-delta_int)) == 0)
+ std.math.cast(u12, -delta_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.adds(
+ zero_reg,
+ adjusted_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ const imm_ra = try isel.allocIntReg();
+ defer isel.freeReg(imm_ra);
+ const imm_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => imm_ra.w(),
+ 33...64 => imm_ra.x(),
+ };
+ try isel.emit(.subs(zero_reg, adjusted_reg, .{ .register = imm_reg }));
+ try isel.movImmediate(imm_reg, @bitCast(delta_int));
+ }
+ }
+
+ switch (low_int) {
+ 0 => {},
+ else => {
+ if (std.math.cast(u12, low_int)) |pos_imm| try isel.emit(.sub(
+ adjusted_reg,
+ cond_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -low_int)) |neg_imm| try isel.emit(.add(
+ adjusted_reg,
+ cond_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(low_int)) == 0)
+ std.math.cast(u12, low_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.sub(
+ adjusted_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-low_int)) == 0)
+ std.math.cast(u12, -low_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.add(
+ adjusted_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ const imm_ra = try isel.allocIntReg();
+ defer isel.freeReg(imm_ra);
+ const imm_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => imm_ra.w(),
+ 33...64 => imm_ra.x(),
+ };
+ try isel.emit(.sub(adjusted_reg, cond_reg, .{ .register = imm_reg }));
+ try isel.movImmediate(imm_reg, @bitCast(low_int));
+ }
+ },
+ }
+ }
+ var case_item_index = case.items.len;
+ while (case_item_index > 0) {
+ case_item_index -= 1;
+
+ const item_val: Constant = .fromInterned(case.items[case_item_index].toInterned().?);
+ var item_bigint_space: Constant.BigIntSpace = undefined;
+ const item_bigint = item_val.toBigInt(&item_bigint_space, zcu);
+ const item_int: i64 = if (item_bigint.positive) @bitCast(
+ item_bigint.toInt(u64) catch
+ return isel.fail("too big case item: {f}", .{isel.fmtConstant(item_val)}),
+ ) else item_bigint.toInt(i64) catch
+ return isel.fail("too big case item: {f}", .{isel.fmtConstant(item_val)});
+
+ if (case_item_index > 0) {
+ if (std.math.cast(u5, item_int)) |pos_imm| try isel.emit(.ccmp(
+ cond_reg,
+ .{ .immediate = pos_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ )) else if (std.math.cast(u5, -item_int)) |neg_imm| try isel.emit(.ccmn(
+ cond_reg,
+ .{ .immediate = neg_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ )) else {
+ const imm_ra = try isel.allocIntReg();
+ defer isel.freeReg(imm_ra);
+ const imm_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => imm_ra.w(),
+ 33...64 => imm_ra.x(),
+ };
+ try isel.emit(.ccmp(
+ cond_reg,
+ .{ .register = imm_reg },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ ));
+ try isel.movImmediate(imm_reg, @bitCast(item_int));
+ }
+ } else {
+ if (std.math.cast(u12, item_int)) |pos_imm| try isel.emit(.subs(
+ zero_reg,
+ cond_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -item_int)) |neg_imm| try isel.emit(.adds(
+ zero_reg,
+ cond_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(item_int)) == 0)
+ std.math.cast(u12, item_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.subs(
+ zero_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-item_int)) == 0)
+ std.math.cast(u12, -item_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.adds(
+ zero_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ const imm_ra = try isel.allocIntReg();
+ defer isel.freeReg(imm_ra);
+ const imm_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => imm_ra.w(),
+ 33...64 => imm_ra.x(),
+ };
+ try isel.emit(.subs(zero_reg, cond_reg, .{ .register = imm_reg }));
+ try isel.movImmediate(imm_reg, @bitCast(item_int));
+ }
+ }
+ }
+ }
+ if (cond_mat) |mat| try mat.finish(isel);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .@"try", .try_cold => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.Try, pl_op.payload);
+ const error_union_ty = isel.air.typeOf(pl_op.operand, ip);
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+
+ const error_union_vi = try isel.use(pl_op.operand);
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| {
+ defer payload_vi.value.deref(isel);
+
+ var payload_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionPayloadOffset(payload_ty, zcu),
+ payload_vi.value.size(isel),
+ );
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, payload_ty, payload_part_vi.?);
+ }
+
+ const cont_label = isel.instructions.items.len;
+ const cont_live_registers = isel.live_registers;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.merge(&cont_live_registers, .{});
+
+ var error_set_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionErrorOffset(payload_ty, zcu),
+ ZigType.fromInterned(error_union_info.error_set_type).abiSize(zcu),
+ );
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ const error_set_part_mat = try error_set_part_vi.?.matReg(isel);
+ try isel.emit(.cbz(
+ error_set_part_mat.ra.w(),
+ @intCast((isel.instructions.items.len + 1 - cont_label) << 2),
+ ));
+ try error_set_part_mat.finish(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .try_ptr, .try_ptr_cold => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.TryPtr, ty_pl.payload);
+ const error_union_ty = isel.air.typeOf(extra.data.ptr, ip).childType(zcu);
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+
+ const error_union_ptr_vi = try isel.use(extra.data.ptr);
+ const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| unused: {
+ defer payload_ptr_vi.value.deref(isel);
+ switch (codegen.errUnionPayloadOffset(ty_pl.ty.toType().childType(zcu), zcu)) {
+ 0 => try payload_ptr_vi.value.move(isel, extra.data.ptr),
+ else => |payload_offset| {
+ const payload_ptr_ra = try payload_ptr_vi.value.defReg(isel) orelse break :unused;
+ const lo12: u12 = @truncate(payload_offset >> 0);
+ const hi12: u12 = @intCast(payload_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ payload_ptr_ra.x(),
+ if (lo12 > 0) payload_ptr_ra.x() else error_union_ptr_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(payload_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
+ },
+ }
+ }
+
+ const cont_label = isel.instructions.items.len;
+ const cont_live_registers = isel.live_registers;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.merge(&cont_live_registers, .{});
+
+ const error_set_ra = try isel.allocIntReg();
+ defer isel.freeReg(error_set_ra);
+ try isel.loadReg(
+ error_set_ra,
+ ZigType.fromInterned(error_union_info.error_set_type).abiSize(zcu),
+ .unsigned,
+ error_union_ptr_mat.ra,
+ codegen.errUnionErrorOffset(payload_ty, zcu),
+ );
+ try error_union_ptr_mat.finish(isel);
+ try isel.emit(.cbz(
+ error_set_ra.w(),
+ @intCast((isel.instructions.items.len + 1 - cont_label) << 2),
+ ));
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_stmt => if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .dbg_empty_stmt => {
+ try isel.emit(.nop());
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_inline_block => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
+ try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
+ isel.air.extra.items[extra.end..][0..extra.data.body_len],
+ ));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => {
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .is_null, .is_non_null => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const opt_ty = isel.air.typeOf(un_op, ip);
+ const payload_ty = opt_ty.optionalChild(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+ const has_value_offset, const has_value_size = if (!opt_ty.optionalReprIsPayload(zcu))
+ .{ payload_size, 1 }
+ else if (payload_ty.isSlice(zcu))
+ .{ 0, 8 }
+ else
+ .{ 0, payload_size };
+
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .is_null => .eq,
+ .is_non_null => .ne,
+ })));
+ const opt_vi = try isel.use(un_op);
+ var has_value_part_it = opt_vi.field(opt_ty, has_value_offset, has_value_size);
+ const has_value_part_vi = try has_value_part_it.only(isel);
+ const has_value_part_mat = try has_value_part_vi.?.matReg(isel);
+ try isel.emit(switch (has_value_size) {
+ else => unreachable,
+ 1...4 => .subs(.wzr, has_value_part_mat.ra.w(), .{ .immediate = 0 }),
+ 5...8 => .subs(.xzr, has_value_part_mat.ra.x(), .{ .immediate = 0 }),
+ });
+ try has_value_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .is_err, .is_non_err => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const error_union_ty = isel.air.typeOf(un_op, ip);
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .is_err => .ne,
+ .is_non_err => .eq,
+ })));
+ const error_union_vi = try isel.use(un_op);
+ var error_set_part_it = error_union_vi.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ const error_set_part_mat = try error_set_part_vi.?.matReg(isel);
+ try isel.emit(.ands(.wzr, error_set_part_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(8 * error_set_size - 1),
+ } }));
+ try error_set_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .load => {
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ptr_ty = isel.air.typeOf(ty_op.operand, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed load", .{});
+
+ if (ptr_info.flags.is_volatile) _ = try isel.use(air.inst_index.toRef());
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ switch (dst_vi.value.size(isel)) {
+ 0 => unreachable,
+ 1...Value.max_parts => {
+ const ptr_vi = try isel.use(ty_op.operand);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ _ = try dst_vi.value.load(isel, ty_op.ty.toType(), ptr_mat.ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ });
+ try ptr_mat.finish(isel);
+ },
+ else => |size| {
+ try dst_vi.value.defAddr(isel, .fromInterned(ptr_info.child), null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const ptr_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, size);
+ try call.paramLiveOut(isel, ptr_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ },
+ }
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ret, .ret_safe => {
+ assert(isel.blocks.keys()[0] == Block.main);
+ try isel.blocks.values()[0].branch(isel);
+ if (isel.live_values.get(Block.main)) |ret_vi| {
+ const un_op = air.data(air.inst_index).un_op;
+ const src_vi = try isel.use(un_op);
+ switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => if (ret_vi.hint(isel)) |ret_ra| {
+ try src_vi.liveOut(isel, ret_ra);
+ } else {
+ var ret_part_it = ret_vi.parts(isel);
+ var src_part_it = src_vi.parts(isel);
+ if (src_part_it.only()) |_| {
+ try isel.values.ensureUnusedCapacity(gpa, ret_part_it.remaining);
+ src_vi.setParts(isel, ret_part_it.remaining);
+ while (ret_part_it.next()) |ret_part_vi| {
+ const src_part_vi = src_vi.addPart(
+ isel,
+ ret_part_vi.get(isel).offset_from_parent,
+ ret_part_vi.size(isel),
+ );
+ switch (ret_part_vi.signedness(isel)) {
+ .signed => src_part_vi.setSignedness(isel, .signed),
+ .unsigned => {},
+ }
+ if (ret_part_vi.isVector(isel)) src_part_vi.setIsVector(isel);
+ }
+ ret_part_it = ret_vi.parts(isel);
+ src_part_it = src_vi.parts(isel);
+ }
+ while (ret_part_it.next()) |ret_part_vi| {
+ const src_part_vi = src_part_it.next().?;
+ assert(ret_part_vi.get(isel).offset_from_parent == src_part_vi.get(isel).offset_from_parent);
+ assert(ret_part_vi.size(isel) == src_part_vi.size(isel));
+ try src_part_vi.liveOut(isel, ret_part_vi.hint(isel).?);
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ const ptr_mat = try address_vi.matReg(isel);
+ try src_vi.store(isel, isel.air.typeOf(un_op, ip), ptr_mat.ra, .{});
+ try ptr_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ret_load => {
+ const un_op = air.data(air.inst_index).un_op;
+ const ptr_ty = isel.air.typeOf(un_op, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed load", .{});
+
+ assert(isel.blocks.keys()[0] == Block.main);
+ try isel.blocks.values()[0].branch(isel);
+ if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {
+ var ret_part_it: Value.PartIterator = if (ret_vi.hint(isel)) |_| .initOne(ret_vi) else ret_vi.parts(isel);
+ while (ret_part_it.next()) |ret_part_vi| try ret_part_vi.liveOut(isel, ret_part_vi.hint(isel).?);
+ const ptr_vi = try isel.use(un_op);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ _ = try ret_vi.load(isel, .fromInterned(ptr_info.child), ptr_mat.ra, .{});
+ try ptr_mat.finish(isel);
+ },
+ .value, .constant => unreachable,
+ .address => {},
+ };
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .store, .store_safe, .atomic_store_unordered => {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ptr_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed store", .{});
+ if (bin_op.rhs.toInterned()) |rhs_val| if (ip.isUndef(rhs_val))
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+
+ const src_vi = try isel.use(bin_op.rhs);
+ const size = src_vi.size(isel);
+ if (ZigType.fromInterned(ptr_info.child).zigTypeTag(zcu) != .@"union") switch (size) {
+ 0 => unreachable,
+ 1...Value.max_parts => {
+ const ptr_vi = try isel.use(bin_op.lhs);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ try src_vi.store(isel, isel.air.typeOf(bin_op.rhs, ip), ptr_mat.ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ });
+ try ptr_mat.finish(isel);
+
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ else => {},
+ };
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const ptr_vi = try isel.use(bin_op.lhs);
+ try isel.movImmediate(.x2, size);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, ptr_vi, .r0);
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unreach => if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .fptrunc, .fpext => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_bits = dst_ty.floatBits(isel.target);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_bits = src_ty.floatBits(isel.target);
+ assert(dst_bits != src_bits);
+ switch (@max(dst_bits, src_bits)) {
+ else => unreachable,
+ 16, 32, 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fcvt(switch (dst_bits) {
+ else => unreachable,
+ 16 => dst_ra.h(),
+ 32 => dst_ra.s(),
+ 64 => dst_ra.d(),
+ }, switch (src_bits) {
+ else => unreachable,
+ 16 => src_mat.ra.h(),
+ 32 => src_mat.ra.s(),
+ 64 => src_mat.ra.d(),
+ }));
+ try src_mat.finish(isel);
+ },
+ 80, 128 => {
+ try call.prepareReturn(isel);
+ switch (dst_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, dst_vi.value, .v0),
+ 80 => {
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi16_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (dst_bits) {
+ else => unreachable,
+ 16 => switch (src_bits) {
+ else => unreachable,
+ 32 => "__truncsfhf2",
+ 64 => "__truncdfhf2",
+ 80 => "__truncxfhf2",
+ 128 => "__trunctfhf2",
+ },
+ 32 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfsf2",
+ 64 => "__truncdfsf2",
+ 80 => "__truncxfsf2",
+ 128 => "__trunctfsf2",
+ },
+ 64 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfdf2",
+ 32 => "__extendsfdf2",
+ 80 => "__truncxfdf2",
+ 128 => "__trunctfdf2",
+ },
+ 80 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfxf2",
+ 32 => "__extendsfxf2",
+ 64 => "__extenddfxf2",
+ 128 => "__trunctfxf2",
+ },
+ 128 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhftf2",
+ 32 => "__extendsftf2",
+ 64 => "__extenddftf2",
+ 80 => "__extendxftf2",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .intcast => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_int_info = src_ty.intInfo(zcu);
+ const can_be_negative = dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed;
+ if ((dst_int_info.bits <= 8 and src_int_info.bits <= 8) or
+ (dst_int_info.bits > 8 and dst_int_info.bits <= 16 and
+ src_int_info.bits > 8 and src_int_info.bits <= 16) or
+ (dst_int_info.bits > 16 and dst_int_info.bits <= 32 and
+ src_int_info.bits > 16 and src_int_info.bits <= 32) or
+ (dst_int_info.bits > 32 and dst_int_info.bits <= 64 and
+ src_int_info.bits > 32 and src_int_info.bits <= 64) or
+ (dst_int_info.bits > 64 and src_int_info.bits > 64 and
+ (dst_int_info.bits - 1) / 128 == (src_int_info.bits - 1) / 128))
+ {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_int_info.bits <= 32 and src_int_info.bits <= 64) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 32) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative) .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ }) else .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else if (dst_int_info.bits <= 32 and src_int_info.bits <= 128) {
+ assert(src_int_info.bits > 64);
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ try isel.emit(.orr(dst_ra.w(), .wzr, .{ .register = src_lo64_mat.ra.w() }));
+ try src_lo64_mat.finish(isel);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 128) {
+ assert(dst_int_info.bits > 32 and src_int_info.bits > 64);
+ const src_vi = try isel.use(ty_op.operand);
+
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try dst_vi.value.copy(isel, dst_ty, src_lo64_vi.?);
+ } else if (dst_int_info.bits <= 128 and src_int_info.bits <= 64) {
+ assert(dst_int_info.bits > 64);
+ const src_vi = try isel.use(ty_op.operand);
+
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (src_int_info.bits <= 32) unused_lo64: {
+ const dst_lo64_ra = try dst_lo64_vi.?.defReg(isel) orelse break :unused_lo64;
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative) .sbfm(dst_lo64_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ }) else .orr(dst_lo64_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else try dst_lo64_vi.?.copy(isel, src_ty, src_vi);
+
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ const dst_hi64_ra = try dst_hi64_vi.?.defReg(isel);
+ if (dst_hi64_ra) |dst_ra| switch (can_be_negative) {
+ false => try isel.emit(.orr(dst_ra.x(), .xzr, .{ .register = .xzr })),
+ true => {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(src_int_info.bits - 1),
+ .imms = @intCast(src_int_info.bits - 1),
+ }));
+ try src_mat.finish(isel);
+ },
+ };
+ } else return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .intcast_safe => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_int_info = src_ty.intInfo(zcu);
+ const can_be_negative = dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed;
+ const panic_id: Zcu.SimplePanicId = panic_id: switch (dst_ty.zigTypeTag(zcu)) {
+ else => unreachable,
+ .int => .integer_out_of_bounds,
+ .@"enum" => {
+ if (!dst_ty.isNonexhaustiveEnum(zcu)) {
+ return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ break :panic_id .invalid_enum_value;
+ },
+ };
+ if (dst_ty.toIntern() == src_ty.toIntern()) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 64) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const dst_active_bits = dst_int_info.bits - @intFromBool(dst_int_info.signedness == .signed);
+ const src_active_bits = src_int_info.bits - @intFromBool(src_int_info.signedness == .signed);
+ if ((dst_int_info.signedness != .unsigned or src_int_info.signedness != .signed) and dst_active_bits >= src_active_bits) {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative and dst_active_bits > 32 and src_active_bits <= 32)
+ .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ })
+ else switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }),
+ 33...64 => .orr(dst_ra.x(), .xzr, .{ .register = src_mat.ra.x() }),
+ });
+ try src_mat.finish(isel);
+ } else {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ if (can_be_negative) {
+ const src_mat = src_mat: {
+ const dst_lock = isel.lockReg(dst_ra);
+ defer dst_lock.unlock(isel);
+ break :src_mat try src_vi.matReg(isel);
+ };
+ try isel.emit(switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .subs(.wzr, dst_ra.w(), .{ .register = src_mat.ra.w() }),
+ 33...64 => .subs(.xzr, dst_ra.x(), .{ .register = src_mat.ra.x() }),
+ });
+ try isel.emit(switch (@max(dst_int_info.bits, src_int_info.bits)) {
+ else => unreachable,
+ 1...32 => .sbfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_int_info.bits - 1),
+ }),
+ 33...64 => .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_int_info.bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ } else {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (@min(dst_int_info.bits, src_int_info.bits)) {
+ else => unreachable,
+ 1...32 => .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }),
+ 33...64 => .orr(dst_ra.x(), .xzr, .{ .register = src_mat.ra.x() }),
+ });
+ const active_bits = @min(dst_active_bits, src_active_bits);
+ try isel.emit(switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .ands(.wzr, src_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - active_bits),
+ .imms = @intCast(32 - active_bits - 1),
+ } }),
+ 33...64 => .ands(.xzr, src_mat.ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - active_bits),
+ .imms = @intCast(64 - active_bits - 1),
+ } }),
+ });
+ try src_mat.finish(isel);
+ }
+ }
+ } else return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .trunc => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!dst_ty.isAbiInt(zcu) or !src_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const dst_int_info = dst_ty.intInfo(zcu);
+ switch (dst_int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_part_it = src_vi.field(src_ty, 0, @min(src_vi.size(isel), 8));
+ const src_part_vi = try src_part_it.only(isel);
+ const src_part_mat = try src_part_vi.?.matReg(isel);
+ try isel.emit(switch (dst_bits) {
+ else => unreachable,
+ 1...31 => |bits| switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.w(), src_part_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.w(), src_part_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ },
+ 32 => .orr(dst_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 33...63 => |bits| switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.x(), src_part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.x(), src_part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ },
+ 64 => .orr(dst_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ });
+ try src_part_mat.finish(isel);
+ },
+ 65...128 => |dst_bits| switch (src_ty.intInfo(zcu).bits) {
+ 0 => unreachable,
+ 65...128 => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ });
+ try src_hi64_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .optional_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| unused: {
+ defer payload_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const opt_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (opt_ty.optionalReprIsPayload(zcu)) {
+ try payload_vi.value.move(isel, ty_op.operand);
+ break :unused;
+ }
+
+ const opt_vi = try isel.use(ty_op.operand);
+ var payload_part_it = opt_vi.field(opt_ty, 0, payload_vi.value.size(isel));
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, ty_op.ty.toType(), payload_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .optional_payload_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| {
+ defer payload_ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ try payload_ptr_vi.value.move(isel, ty_op.operand);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .optional_payload_ptr_set => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| {
+ defer payload_ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const opt_ty = isel.air.typeOf(ty_op.operand, ip).childType(zcu);
+ if (!opt_ty.optionalReprIsPayload(zcu)) {
+ const opt_ptr_vi = try isel.use(ty_op.operand);
+ const opt_ptr_mat = try opt_ptr_vi.matReg(isel);
+ const has_value_ra = try isel.allocIntReg();
+ defer isel.freeReg(has_value_ra);
+ try isel.storeReg(
+ has_value_ra,
+ 1,
+ opt_ptr_mat.ra,
+ opt_ty.optionalChild(zcu).abiSize(zcu),
+ );
+ try opt_ptr_mat.finish(isel);
+ try isel.emit(.movz(has_value_ra.w(), 1, .{ .lsl = .@"0" }));
+ }
+ try payload_ptr_vi.value.move(isel, ty_op.operand);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_optional => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |opt_vi| unused: {
+ defer opt_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ if (ty_op.ty.toType().optionalReprIsPayload(zcu)) {
+ try opt_vi.value.move(isel, ty_op.operand);
+ break :unused;
+ }
+
+ const payload_size = isel.air.typeOf(ty_op.operand, ip).abiSize(zcu);
+ var payload_part_it = opt_vi.value.field(ty_op.ty.toType(), 0, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_part_vi.?.move(isel, ty_op.operand);
+ var has_value_part_it = opt_vi.value.field(ty_op.ty.toType(), payload_size, 1);
+ const has_value_part_vi = try has_value_part_it.only(isel);
+ const has_value_part_ra = try has_value_part_vi.?.defReg(isel) orelse break :unused;
+ try isel.emit(.movz(has_value_part_ra.w(), 1, .{ .lsl = .@"0" }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| {
+ defer payload_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = isel.air.typeOf(ty_op.operand, ip);
+
+ const error_union_vi = try isel.use(ty_op.operand);
+ var payload_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionPayloadOffset(ty_op.ty.toType(), zcu),
+ payload_vi.value.size(isel),
+ );
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, ty_op.ty.toType(), payload_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_err => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_set_vi| {
+ defer error_set_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = isel.air.typeOf(ty_op.operand, ip);
+
+ const error_union_vi = try isel.use(ty_op.operand);
+ var error_set_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionErrorOffset(error_union_ty.errorUnionPayload(zcu), zcu),
+ error_set_vi.value.size(isel),
+ );
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ try error_set_vi.value.copy(isel, ty_op.ty.toType(), error_set_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_payload_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| unused: {
+ defer payload_ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ switch (codegen.errUnionPayloadOffset(ty_op.ty.toType().childType(zcu), zcu)) {
+ 0 => try payload_ptr_vi.value.move(isel, ty_op.operand),
+ else => |payload_offset| {
+ const payload_ptr_ra = try payload_ptr_vi.value.defReg(isel) orelse break :unused;
+ const error_union_ptr_vi = try isel.use(ty_op.operand);
+ const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
+ const lo12: u12 = @truncate(payload_offset >> 0);
+ const hi12: u12 = @intCast(payload_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ payload_ptr_ra.x(),
+ if (lo12 > 0) payload_ptr_ra.x() else error_union_ptr_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(payload_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
+ try error_union_ptr_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_err_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_ptr_vi| unused: {
+ defer error_ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ switch (codegen.errUnionErrorOffset(
+ isel.air.typeOf(ty_op.operand, ip).childType(zcu).errorUnionPayload(zcu),
+ zcu,
+ )) {
+ 0 => try error_ptr_vi.value.move(isel, ty_op.operand),
+ else => |error_offset| {
+ const error_ptr_ra = try error_ptr_vi.value.defReg(isel) orelse break :unused;
+ const error_union_ptr_vi = try isel.use(ty_op.operand);
+ const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
+ const lo12: u12 = @truncate(error_offset >> 0);
+ const hi12: u12 = @intCast(error_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ error_ptr_ra.x(),
+ if (lo12 > 0) error_ptr_ra.x() else error_union_ptr_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(error_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
+ try error_union_ptr_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .errunion_payload_ptr_set => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| unused: {
+ defer payload_ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const payload_ty = ty_op.ty.toType().childType(zcu);
+ const error_union_ty = isel.air.typeOf(ty_op.operand, ip).childType(zcu);
+ const error_set_size = error_union_ty.errorUnionSet(zcu).abiSize(zcu);
+ const error_union_ptr_vi = try isel.use(ty_op.operand);
+ const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
+ if (error_set_size > 0) try isel.storeReg(
+ .zr,
+ error_set_size,
+ error_union_ptr_mat.ra,
+ codegen.errUnionErrorOffset(payload_ty, zcu),
+ );
+ switch (codegen.errUnionPayloadOffset(payload_ty, zcu)) {
+ 0 => {
+ try error_union_ptr_mat.finish(isel);
+ try payload_ptr_vi.value.move(isel, ty_op.operand);
+ },
+ else => |payload_offset| {
+ const payload_ptr_ra = try payload_ptr_vi.value.defReg(isel) orelse break :unused;
+ const lo12: u12 = @truncate(payload_offset >> 0);
+ const hi12: u12 = @intCast(payload_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ payload_ptr_ra.x(),
+ if (lo12 > 0) payload_ptr_ra.x() else error_union_ptr_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(payload_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
+ try error_union_ptr_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_errunion_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_union_vi| {
+ defer error_union_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = ty_op.ty.toType();
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+
+ var payload_part_it = error_union_vi.value.field(error_union_ty, payload_offset, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_part_vi.?.move(isel, ty_op.operand);
+ var error_set_part_it = error_union_vi.value.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ if (try error_set_part_vi.?.defReg(isel)) |error_set_part_ra| try isel.emit(switch (error_set_size) {
+ else => unreachable,
+ 1...4 => .orr(error_set_part_ra.w(), .wzr, .{ .register = .wzr }),
+ 5...8 => .orr(error_set_part_ra.x(), .xzr, .{ .register = .xzr }),
+ });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_errunion_err => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_union_vi| {
+ defer error_union_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = ty_op.ty.toType();
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+
+ if (payload_size > 0) {
+ var payload_part_it = error_union_vi.value.field(error_union_ty, payload_offset, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ if (try payload_part_vi.?.defReg(isel)) |payload_part_ra| try isel.emit(switch (payload_size) {
+ else => unreachable,
+ 1...4 => .orr(payload_part_ra.w(), .wzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ 5...8 => .orr(payload_part_ra.x(), .xzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ });
+ }
+ var error_set_part_it = error_union_vi.value.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ try error_set_part_vi.?.move(isel, ty_op.operand);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+ switch (codegen.fieldOffset(
+ isel.air.typeOf(extra.struct_operand, ip),
+ ty_pl.ty.toType(),
+ extra.field_index,
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, extra.struct_operand),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(extra.struct_operand);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ switch (codegen.fieldOffset(
+ isel.air.typeOf(ty_op.operand, ip),
+ ty_op.ty.toType(),
+ switch (air_tag) {
+ else => unreachable,
+ .struct_field_ptr_index_0 => 0,
+ .struct_field_ptr_index_1 => 1,
+ .struct_field_ptr_index_2 => 2,
+ .struct_field_ptr_index_3 => 3,
+ },
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, ty_op.operand),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |field_vi| {
+ defer field_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+ const agg_ty = isel.air.typeOf(extra.struct_operand, ip);
+ const field_ty = ty_pl.ty.toType();
+ const field_bit_offset, const field_bit_size, const is_packed = switch (agg_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => .{
+ 8 * agg_ty.structFieldOffset(extra.field_index, zcu),
+ 8 * field_ty.abiSize(zcu),
+ false,
+ },
+ .@"packed" => .{
+ if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct|
+ zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index)
+ else
+ 0,
+ field_ty.bitSize(zcu),
+ true,
+ },
+ };
+ if (is_packed) return isel.fail("packed field of {f}", .{
+ isel.fmtType(agg_ty),
+ });
+
+ const agg_vi = try isel.use(extra.struct_operand);
+ var agg_part_it = agg_vi.field(agg_ty, @divExact(field_bit_offset, 8), @divExact(field_bit_size, 8));
+ while (try agg_part_it.next(isel)) |agg_part| {
+ var field_part_it = field_vi.value.field(ty_pl.ty.toType(), agg_part.offset, agg_part.vi.size(isel));
+ const field_part_vi = try field_part_it.only(isel);
+ if (field_part_vi.? == agg_part.vi) continue;
+ var field_subpart_it = field_part_vi.?.parts(isel);
+ const field_part_offset = if (field_subpart_it.only()) |field_subpart_vi|
+ field_subpart_vi.get(isel).offset_from_parent
+ else
+ 0;
+ while (field_subpart_it.next()) |field_subpart_vi| {
+ const field_subpart_ra = try field_subpart_vi.defReg(isel) orelse continue;
+ const field_subpart_offset, const field_subpart_size = field_subpart_vi.position(isel);
+ var agg_subpart_it = agg_part.vi.field(
+ field_ty,
+ agg_part.offset + field_subpart_offset - field_part_offset,
+ field_subpart_size,
+ );
+ const agg_subpart_vi = try agg_subpart_it.only(isel);
+ try agg_subpart_vi.?.liveOut(isel, field_subpart_ra);
+ }
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .set_union_tag => {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const union_ty = isel.air.typeOf(bin_op.lhs, ip).childType(zcu);
+ const union_layout = union_ty.unionGetLayout(zcu);
+ const tag_vi = try isel.use(bin_op.rhs);
+ const union_ptr_vi = try isel.use(bin_op.lhs);
+ const union_ptr_mat = try union_ptr_vi.matReg(isel);
+ try tag_vi.store(isel, isel.air.typeOf(bin_op.rhs, ip), union_ptr_mat.ra, .{
+ .offset = union_layout.tagOffset(),
+ });
+ try union_ptr_mat.finish(isel);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .get_union_tag => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |tag_vi| {
+ defer tag_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const union_ty = isel.air.typeOf(ty_op.operand, ip);
+ const union_layout = union_ty.unionGetLayout(zcu);
+ const union_vi = try isel.use(ty_op.operand);
+ var tag_part_it = union_vi.field(union_ty, union_layout.tagOffset(), union_layout.tag_size);
+ const tag_part_vi = try tag_part_it.only(isel);
+ try tag_vi.value.copy(isel, ty_op.ty.toType(), tag_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |slice_vi| {
+ defer slice_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ var ptr_part_it = slice_vi.value.field(ty_pl.ty.toType(), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_part_vi.?.move(isel, bin_op.lhs);
+ var len_part_it = slice_vi.value.field(ty_pl.ty.toType(), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ try len_part_vi.?.move(isel, bin_op.rhs);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_len => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |len_vi| {
+ defer len_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const slice_vi = try isel.use(ty_op.operand);
+ var len_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ try len_vi.value.copy(isel, ty_op.ty.toType(), len_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| {
+ defer ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const slice_vi = try isel.use(ty_op.operand);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_vi.value.copy(isel, ty_op.ty.toType(), ptr_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .array_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const array_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const elem_ty = array_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
+ if (elem_size <= 16 and array_ty.arrayLenIncludingSentinel(zcu) <= Value.max_parts) if (bin_op.rhs.toInterned()) |index_val| {
+ const elem_offset = elem_size * Constant.fromInterned(index_val).toUnsignedInt(zcu);
+ const array_vi = try isel.use(bin_op.lhs);
+ var elem_part_it = array_vi.field(array_ty, elem_offset, elem_size);
+ const elem_part_vi = try elem_part_it.only(isel);
+ try elem_vi.value.copy(isel, elem_ty, elem_part_vi.?);
+ break :unused;
+ };
+ switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const array_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(array_ptr_ra);
+ const index_vi = try isel.use(bin_op.rhs);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => if (elem_vi.value.isVector(isel)) .ldr(elem_ra.b(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => if (elem_vi.value.isVector(isel)) .ldr(elem_ra.h(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(if (elem_vi.value.isVector(isel)) elem_ra.s() else elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(if (elem_vi.value.isVector(isel)) elem_ra.d() else elem_ra.x(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ 16 => .ldr(elem_ra.q(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 4 },
+ } }),
+ });
+ try index_mat.finish(isel);
+ const array_vi = try isel.use(bin_op.lhs);
+ try array_vi.address(isel, 0, array_ptr_ra);
+ },
+ else => {
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ if (!try elem_vi.value.load(isel, elem_ty, ptr_ra, .{})) break :unused;
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(ptr_ra, ptr_ra, .add, elem_size, index_vi);
+ const array_vi = try isel.use(bin_op.lhs);
+ try array_vi.address(isel, 0, ptr_ra);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const slice_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = slice_ty.ptrInfo(zcu);
+ const elem_size = elem_vi.value.size(isel);
+ const elem_is_vector = elem_vi.value.isVector(isel);
+ if (switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => true,
+ 16 => elem_is_vector,
+ else => false,
+ }) {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const slice_vi = try isel.use(bin_op.lhs);
+ const index_vi = try isel.use(bin_op.rhs);
+ var ptr_part_it = slice_vi.field(slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const base_mat = try ptr_part_vi.?.matReg(isel);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => if (elem_is_vector) .ldr(elem_ra.b(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => if (elem_is_vector) .ldr(elem_ra.h(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(if (elem_is_vector) elem_ra.s() else elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(if (elem_is_vector) elem_ra.d() else elem_ra.x(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ 16 => if (elem_is_vector) .ldr(elem_ra.q(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 4 },
+ } }) else unreachable,
+ });
+ try index_mat.finish(isel);
+ try base_mat.finish(isel);
+ } else {
+ const elem_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(elem_ptr_ra);
+ if (!try elem_vi.value.load(isel, slice_ty.elemType2(zcu), elem_ptr_ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ })) break :unused;
+ const slice_vi = try isel.use(bin_op.lhs);
+ var ptr_part_it = slice_vi.field(slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_mat = try ptr_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, ptr_part_mat.ra, .add, elem_size, index_vi);
+ try ptr_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_elem_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_ptr_vi| unused: {
+ defer elem_ptr_vi.value.deref(isel);
+ const elem_ptr_ra = try elem_ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().childType(zcu).abiSize(zcu);
+
+ const slice_vi = try isel.use(bin_op.lhs);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(bin_op.lhs, ip), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_mat = try ptr_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, ptr_part_mat.ra, .add, elem_size, index_vi);
+ try ptr_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ptr_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ const elem_size = elem_vi.value.size(isel);
+ const elem_is_vector = elem_vi.value.isVector(isel);
+ if (switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => true,
+ 16 => elem_is_vector,
+ else => false,
+ }) {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const base_vi = try isel.use(bin_op.lhs);
+ const index_vi = try isel.use(bin_op.rhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => if (elem_is_vector) .ldr(elem_ra.b(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => if (elem_is_vector) .ldr(elem_ra.h(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(if (elem_is_vector) elem_ra.s() else elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(if (elem_is_vector) elem_ra.d() else elem_ra.x(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ 16 => if (elem_is_vector) .ldr(elem_ra.q(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 4 },
+ } }) else unreachable,
+ });
+ try index_mat.finish(isel);
+ try base_mat.finish(isel);
+ } else {
+ const elem_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(elem_ptr_ra);
+ if (!try elem_vi.value.load(isel, ptr_ty.elemType2(zcu), elem_ptr_ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ })) break :unused;
+ const base_vi = try isel.use(bin_op.lhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, base_mat.ra, .add, elem_size, index_vi);
+ try base_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_elem_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_ptr_vi| unused: {
+ defer elem_ptr_vi.value.deref(isel);
+ const elem_ptr_ra = try elem_ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().childType(zcu).abiSize(zcu);
+
+ const base_vi = try isel.use(bin_op.lhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, base_mat.ra, .add, elem_size, index_vi);
+ try base_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .array_to_slice => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |slice_vi| {
+ defer slice_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ var ptr_part_it = slice_vi.value.field(ty_op.ty.toType(), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_part_vi.?.move(isel, ty_op.operand);
+ var len_part_it = slice_vi.value.field(ty_op.ty.toType(), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ if (try len_part_vi.?.defReg(isel)) |len_ra| try isel.movImmediate(
+ len_ra.x(),
+ isel.air.typeOf(ty_op.operand, ip).childType(zcu).arrayLen(zcu),
+ );
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .int_from_float, .int_from_float_optimized => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!dst_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_bits = src_ty.floatBits(isel.target);
+ switch (@max(dst_int_info.bits, src_bits)) {
+ 0 => unreachable,
+ 1...64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (src_bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const src_ra = if (need_fcvt) try isel.allocVecReg() else src_mat.ra;
+ defer if (need_fcvt) isel.freeReg(src_ra);
+ const dst_reg = switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...32 => dst_ra.w(),
+ 33...64 => dst_ra.x(),
+ };
+ const src_reg = switch (src_bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) src_ra.s() else src_ra.h(),
+ 32 => src_ra.s(),
+ 64 => src_ra.d(),
+ };
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .fcvtzs(dst_reg, src_reg),
+ .unsigned => .fcvtzu(dst_reg, src_reg),
+ });
+ if (need_fcvt) try isel.emit(.fcvt(src_reg, src_mat.ra.h()));
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ try call.prepareReturn(isel);
+ switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...64 => try call.returnLiveIn(isel, dst_vi.value, .r0),
+ 65...128 => {
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi64_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...32 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfsi",
+ 32 => "__fixsfsi",
+ 64 => "__fixdfsi",
+ 80 => "__fixxfsi",
+ 128 => "__fixtfsi",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfsi",
+ 32 => "__fixunssfsi",
+ 64 => "__fixunsdfsi",
+ 80 => "__fixunsxfsi",
+ 128 => "__fixunstfsi",
+ },
+ },
+ 33...64 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfdi",
+ 32 => "__fixsfdi",
+ 64 => "__fixdfdi",
+ 80 => "__fixxfdi",
+ 128 => "__fixtfdi",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfdi",
+ 32 => "__fixunssfdi",
+ 64 => "__fixunsdfdi",
+ 80 => "__fixunsxfdi",
+ 128 => "__fixunstfdi",
+ },
+ },
+ 65...128 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfti",
+ 32 => "__fixsfti",
+ 64 => "__fixdfti",
+ 80 => "__fixxfti",
+ 128 => "__fixtfti",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfti",
+ 32 => "__fixunssfti",
+ 64 => "__fixunsdfti",
+ 80 => "__fixunsxfti",
+ 128 => "__fixunstfti",
+ },
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .float_from_int => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const dst_bits = dst_ty.floatBits(isel.target);
+ if (!src_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const src_int_info = src_ty.intInfo(zcu);
+ switch (@max(dst_bits, src_int_info.bits)) {
+ 0 => unreachable,
+ 1...64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (dst_bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(dst_ra.h(), dst_ra.s()));
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const dst_reg = switch (dst_bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) dst_ra.s() else dst_ra.h(),
+ 32 => dst_ra.s(),
+ 64 => dst_ra.d(),
+ };
+ const src_reg = switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => src_mat.ra.w(),
+ 33...64 => src_mat.ra.x(),
+ };
+ try isel.emit(switch (src_int_info.signedness) {
+ .signed => .scvtf(dst_reg, src_reg),
+ .unsigned => .ucvtf(dst_reg, src_reg),
+ });
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ try call.prepareReturn(isel);
+ switch (dst_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, dst_vi.value, .v0),
+ 80 => {
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi16_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatsihf",
+ 32 => "__floatsisf",
+ 64 => "__floatsidf",
+ 80 => "__floatsixf",
+ 128 => "__floatsitf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatunsihf",
+ 32 => "__floatunsisf",
+ 64 => "__floatunsidf",
+ 80 => "__floatunsixf",
+ 128 => "__floatunsitf",
+ },
+ },
+ 33...64 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatdihf",
+ 32 => "__floatdisf",
+ 64 => "__floatdidf",
+ 80 => "__floatdixf",
+ 128 => "__floatditf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatundihf",
+ 32 => "__floatundisf",
+ 64 => "__floatundidf",
+ 80 => "__floatundixf",
+ 128 => "__floatunditf",
+ },
+ },
+ 65...128 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floattihf",
+ 32 => "__floattisf",
+ 64 => "__floattidf",
+ 80 => "__floattixf",
+ 128 => "__floattitf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatuntihf",
+ 32 => "__floatuntisf",
+ 64 => "__floatuntidf",
+ 80 => "__floatuntixf",
+ 128 => "__floatuntitf",
+ },
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...64 => try call.paramLiveOut(isel, src_vi, .r0),
+ 65...128 => {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ try call.paramLiveOut(isel, src_hi64_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .memset, .memset_safe => |air_tag| {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const dst_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const dst_info = dst_ty.ptrInfo(zcu);
+ const fill_byte: union(enum) { constant: u8, value: Air.Inst.Ref } = fill_byte: {
+ if (bin_op.rhs.toInterned()) |fill_val| {
+ if (ip.isUndef(fill_val)) switch (air_tag) {
+ else => unreachable,
+ .memset => break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .memset_safe => break :fill_byte .{ .constant = 0xaa },
+ };
+ if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte|
+ break :fill_byte .{ .constant = fill_byte };
+ }
+ switch (dst_ty.elemType2(zcu).abiSize(zcu)) {
+ 0 => unreachable,
+ 1 => break :fill_byte .{ .value = bin_op.rhs },
+ 2, 4, 8 => |size| {
+ const dst_vi = try isel.use(bin_op.lhs);
+ const ptr_ra = try isel.allocIntReg();
+ const fill_vi = try isel.use(bin_op.rhs);
+ const fill_mat = try fill_vi.matReg(isel);
+ const len_mat: Value.Materialize = len_mat: switch (dst_info.flags.size) {
+ .one => .{ .vi = undefined, .ra = try isel.allocIntReg() },
+ .many => unreachable,
+ .slice => {
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ break :len_mat try dst_len_vi.?.matReg(isel);
+ },
+ .c => unreachable,
+ };
+
+ const skip_label = isel.instructions.items.len;
+ _ = try isel.instructions.addOne(gpa);
+ try isel.emit(.sub(len_mat.ra.x(), len_mat.ra.x(), .{ .immediate = 1 }));
+ try isel.emit(switch (size) {
+ else => unreachable,
+ 2 => .strh(fill_mat.ra.w(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 2 } }),
+ 4 => .str(fill_mat.ra.w(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 4 } }),
+ 8 => .str(fill_mat.ra.x(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 8 } }),
+ });
+ isel.instructions.items[skip_label] = .cbnz(
+ len_mat.ra.x(),
+ -@as(i21, @intCast((isel.instructions.items.len - 1 - skip_label) << 2)),
+ );
+ switch (dst_info.flags.size) {
+ .one => {
+ const len_imm = ZigType.fromInterned(dst_info.child).arrayLen(zcu);
+ assert(len_imm > 0);
+ try isel.movImmediate(len_mat.ra.x(), len_imm);
+ isel.freeReg(len_mat.ra);
+ try fill_mat.finish(isel);
+ isel.freeReg(ptr_ra);
+ try dst_vi.liveOut(isel, ptr_ra);
+ },
+ .many => unreachable,
+ .slice => {
+ try isel.emit(.cbz(
+ len_mat.ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try len_mat.finish(isel);
+ try fill_mat.finish(isel);
+ isel.freeReg(ptr_ra);
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ try dst_ptr_vi.?.liveOut(isel, ptr_ra);
+ },
+ .c => unreachable,
+ }
+
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty) }),
+ }
+ };
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memset",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const dst_vi = try isel.use(bin_op.lhs);
+ switch (dst_info.flags.size) {
+ .one => {
+ try isel.movImmediate(.x2, ZigType.fromInterned(dst_info.child).abiSize(zcu));
+ switch (fill_byte) {
+ .constant => |byte| try isel.movImmediate(.w1, byte),
+ .value => |byte| try call.paramLiveOut(isel, try isel.use(byte), .r1),
+ }
+ try call.paramLiveOut(isel, dst_vi, .r0);
+ },
+ .many => unreachable,
+ .slice => {
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ try isel.elemPtr(.r2, .zr, .add, ZigType.fromInterned(dst_info.child).abiSize(zcu), dst_len_vi.?);
+ switch (fill_byte) {
+ .constant => |byte| try isel.movImmediate(.w1, byte),
+ .value => |byte| try call.paramLiveOut(isel, try isel.use(byte), .r1),
+ }
+ try call.paramLiveOut(isel, dst_ptr_vi.?, .r0);
+ },
+ .c => unreachable,
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .memcpy, .memmove => |air_tag| {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const dst_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const dst_info = dst_ty.ptrInfo(zcu);
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = @tagName(air_tag),
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ switch (dst_info.flags.size) {
+ .one => {
+ const dst_vi = try isel.use(bin_op.lhs);
+ const src_vi = try isel.use(bin_op.rhs);
+ try isel.movImmediate(.x2, ZigType.fromInterned(dst_info.child).abiSize(zcu));
+ try call.paramLiveOut(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, dst_vi, .r0);
+ },
+ .many => unreachable,
+ .slice => {
+ const dst_vi = try isel.use(bin_op.lhs);
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ const src_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(.r2, .zr, .add, ZigType.fromInterned(dst_info.child).abiSize(zcu), dst_len_vi.?);
+ try call.paramLiveOut(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, dst_ptr_vi.?, .r0);
+ },
+ .c => unreachable,
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .atomic_load => {
+ const atomic_load = air.data(air.inst_index).atomic_load;
+ const ptr_ty = isel.air.typeOf(atomic_load.ptr, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (atomic_load.order != .unordered) return isel.fail("ordered atomic load", .{});
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed atomic load", .{});
+
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
+ defer dst_vi.value.deref(isel);
+ var ptr_mat: ?Value.Materialize = null;
+ var dst_part_it = dst_vi.value.parts(isel);
+ while (dst_part_it.next()) |dst_part_vi| {
+ const dst_ra = try dst_part_vi.defReg(isel) orelse continue;
+ if (ptr_mat == null) {
+ const ptr_vi = try isel.use(atomic_load.ptr);
+ ptr_mat = try ptr_vi.matReg(isel);
+ }
+ try isel.emit(switch (dst_part_vi.size(isel)) {
+ else => |size| return isel.fail("bad atomic load size of {d} from {f}", .{
+ size, isel.fmtType(ptr_ty),
+ }),
+ 1 => switch (dst_part_vi.signedness(isel)) {
+ .signed => .ldrsb(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ .unsigned => .ldrb(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ },
+ 2 => switch (dst_part_vi.signedness(isel)) {
+ .signed => .ldrsh(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ .unsigned => .ldrh(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ },
+ 4 => .ldr(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ 8 => .ldr(dst_ra.x(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ });
+ }
+ if (ptr_mat) |mat| try mat.finish(isel);
+ } else if (ptr_info.flags.is_volatile) return isel.fail("volatile atomic load", .{});
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .error_name => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |name_vi| unused: {
+ defer name_vi.value.deref(isel);
+ var ptr_part_it = name_vi.value.field(.slice_const_u8_sentinel_0, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_ra = try ptr_part_vi.?.defReg(isel);
+ var len_part_it = name_vi.value.field(.slice_const_u8_sentinel_0, 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ const len_part_ra = try len_part_vi.?.defReg(isel);
+ if (ptr_part_ra == null and len_part_ra == null) break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const error_vi = try isel.use(un_op);
+ const error_mat = try error_vi.matReg(isel);
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ const start_ra, const end_ra = range_ras: {
+ const name_lock: RegLock = if (len_part_ra != null) if (ptr_part_ra) |name_ptr_ra|
+ isel.tryLockReg(name_ptr_ra)
+ else
+ .empty else .empty;
+ defer name_lock.unlock(isel);
+ break :range_ras .{ try isel.allocIntReg(), try isel.allocIntReg() };
+ };
+ defer {
+ isel.freeReg(start_ra);
+ isel.freeReg(end_ra);
+ }
+ if (len_part_ra) |name_len_ra| try isel.emit(.sub(
+ name_len_ra.w(),
+ end_ra.w(),
+ .{ .register = start_ra.w() },
+ ));
+ if (ptr_part_ra) |name_ptr_ra| try isel.emit(.add(
+ name_ptr_ra.x(),
+ ptr_ra.x(),
+ .{ .extended_register = .{
+ .register = start_ra.w(),
+ .extend = .{ .uxtw = 0 },
+ } },
+ ));
+ if (len_part_ra) |_| try isel.emit(.sub(end_ra.w(), end_ra.w(), .{ .immediate = 1 }));
+ try isel.emit(.ldp(start_ra.w(), end_ra.w(), .{ .base = start_ra.x() }));
+ try isel.emit(.add(start_ra.x(), ptr_ra.x(), .{ .extended_register = .{
+ .register = error_mat.ra.w(),
+ .extend = switch (zcu.errorSetBits()) {
+ else => unreachable,
+ 1...8 => .{ .uxtb = 2 },
+ 9...16 => .{ .uxth = 2 },
+ 17...32 => .{ .uxtw = 2 },
+ },
+ } }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ try error_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .aggregate_init => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |agg_vi| {
+ defer agg_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const agg_ty = ty_pl.ty.toType();
+ switch (ip.indexToKey(agg_ty.toIntern())) {
+ .array_type => |array_type| {
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..@intCast(array_type.len)]);
+ var elem_offset: u64 = 0;
+ const elem_size = ZigType.fromInterned(array_type.child).abiSize(zcu);
+ for (elems) |elem| {
+ var agg_part_it = agg_vi.value.field(agg_ty, elem_offset, elem_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elem);
+ elem_offset += elem_size;
+ }
+ switch (array_type.sentinel) {
+ .none => {},
+ else => |sentinel| {
+ var agg_part_it = agg_vi.value.field(agg_ty, elem_offset, elem_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, .fromIntern(sentinel));
+ },
+ }
+ },
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(agg_ty.toIntern());
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..loaded_struct.field_types.len]);
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ var agg_part_it = agg_vi.value.field(agg_ty, field_offset, field_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elems[field_index]);
+ field_offset += field_size;
+ }
+ assert(loaded_struct.flagsUnordered(ip).alignment.forward(field_offset) == agg_vi.value.size(isel));
+ },
+ .tuple_type => |tuple_type| {
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..tuple_type.types.len]);
+ var tuple_align: InternPool.Alignment = .@"1";
+ var field_offset: u64 = 0;
+ for (
+ tuple_type.types.get(ip),
+ tuple_type.values.get(ip),
+ elems,
+ ) |field_ty_index, field_val, elem| {
+ if (field_val != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_ty_index);
+ const field_align = field_ty.abiAlignment(zcu);
+ tuple_align = tuple_align.maxStrict(field_align);
+ field_offset = field_align.forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ var agg_part_it = agg_vi.value.field(agg_ty, field_offset, field_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elem);
+ field_offset += field_size;
+ }
+ assert(tuple_align.forward(field_offset) == agg_vi.value.size(isel));
+ },
+ else => return isel.fail("aggregate init {f}", .{isel.fmtType(agg_ty)}),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .union_init => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |un_vi| unused: {
+ defer un_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ const un_ty = ty_pl.ty.toType();
+ if (un_ty.containerLayout(zcu) != .@"extern") return isel.fail("bad union init {f}", .{isel.fmtType(un_ty)});
+
+ try un_vi.value.defAddr(isel, un_ty, null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const init_vi = try isel.use(extra.init);
+ try isel.movImmediate(.x2, init_vi.size(isel));
+ try call.paramAddress(isel, init_vi, .r1);
+ try call.paramAddress(isel, un_vi.value, .r0);
+ try call.finishParams(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .prefetch => {
+ const prefetch = air.data(air.inst_index).prefetch;
+ if (!(prefetch.rw == .write and prefetch.cache == .instruction)) {
+ const maybe_slice_ty = isel.air.typeOf(prefetch.ptr, ip);
+ const maybe_slice_vi = try isel.use(prefetch.ptr);
+ const ptr_vi = if (maybe_slice_ty.isSlice(zcu)) ptr_vi: {
+ var ptr_part_it = maybe_slice_vi.field(maybe_slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ break :ptr_vi ptr_part_vi.?;
+ } else maybe_slice_vi;
+ const ptr_mat = try ptr_vi.matReg(isel);
+ try isel.emit(.prfm(.{
+ .policy = switch (prefetch.locality) {
+ 1, 2, 3 => .keep,
+ 0 => .strm,
+ },
+ .target = switch (prefetch.locality) {
+ 0, 3 => .l1,
+ 2 => .l2,
+ 1 => .l3,
+ },
+ .type = switch (prefetch.rw) {
+ .read => switch (prefetch.cache) {
+ .data => .pld,
+ .instruction => .pli,
+ },
+ .write => switch (prefetch.cache) {
+ .data => .pst,
+ .instruction => unreachable,
+ },
+ },
+ }, .{ .base = ptr_mat.ra.x() }));
+ try ptr_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul_add => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const pl_op = air.data(air.inst_index).pl_op;
+ const bin_op = isel.air.extraData(Air.Bin, pl_op.payload).data;
+ const ty = isel.air.typeOf(pl_op.operand, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const addend_vi = try isel.use(pl_op.operand);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const addend_mat = try addend_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ const addend_ra = if (need_fcvt) try isel.allocVecReg() else addend_mat.ra;
+ defer if (need_fcvt) isel.freeReg(addend_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fmadd(res_ra.h(), lhs_ra.h(), rhs_ra.h(), addend_ra.h()),
+ 32 => .fmadd(res_ra.s(), lhs_ra.s(), rhs_ra.s(), addend_ra.s()),
+ 64 => .fmadd(res_ra.d(), lhs_ra.d(), rhs_ra.d(), addend_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(addend_ra.s(), addend_mat.ra.h()));
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try addend_mat.finish(isel);
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .name = switch (bits) {
+ else => unreachable,
+ 16 => "__fmah",
+ 32 => "fmaf",
+ 64 => "fma",
+ 80 => "__fmax",
+ 128 => "fmaq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const addend_vi = try isel.use(pl_op.operand);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, addend_vi, .v2);
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var addend_hi16_it = addend_vi.field(ty, 8, 8);
+ const addend_hi16_vi = try addend_hi16_it.only(isel);
+ try call.paramLiveOut(isel, addend_hi16_vi.?, .r5);
+ var addend_lo64_it = addend_vi.field(ty, 0, 8);
+ const addend_lo64_vi = try addend_lo64_it.only(isel);
+ try call.paramLiveOut(isel, addend_lo64_vi.?, .r4);
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .field_parent_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+ switch (codegen.fieldOffset(
+ ty_pl.ty.toType(),
+ isel.air.typeOf(extra.field_ptr, ip),
+ extra.field_index,
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, extra.field_ptr),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(extra.field_ptr);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.sub(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.sub(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wasm_memory_size, .wasm_memory_grow => unreachable,
+ .cmp_lt_errors_len => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(.ls)));
+
+ const un_op = air.data(air.inst_index).un_op;
+ const error_vi = try isel.use(un_op);
+ const error_mat = try error_vi.matReg(isel);
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ try isel.emit(.subs(.wzr, error_mat.ra.w(), .{ .register = ptr_ra.w() }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.ldr(ptr_ra.w(), .{ .base = ptr_ra.x() }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ try error_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .runtime_nav_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| unused: {
+ defer ptr_vi.value.deref(isel);
+ const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_nav = air.data(air.inst_index).ty_nav;
+ if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.nav_relocs.append(gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adr(ptr_ra.x(), 0));
+ },
+ true => {
+ try isel.nav_relocs.append(gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.nav_relocs.append(gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ },
+ } else try isel.movImmediate(ptr_ra.x(), isel.pt.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .c_va_arg => {
+ const maybe_arg_vi = isel.live_values.fetchRemove(air.inst_index);
+ defer if (maybe_arg_vi) |arg_vi| arg_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ var param_it: CallAbiIterator = .init;
+ const param_vi = try param_it.param(isel, ty);
+ defer param_vi.?.deref(isel);
+ const passed_vi = switch (param_vi.?.parent(isel)) {
+ .unallocated => param_vi.?,
+ .stack_slot, .value, .constant => unreachable,
+ .address => |address_vi| address_vi,
+ };
+ const passed_size: u5 = @intCast(passed_vi.alignment(isel).forward(passed_vi.size(isel)));
+ const passed_is_vector = passed_vi.isVector(isel);
+
+ const va_list_ptr_vi = try isel.use(ty_op.operand);
+ const va_list_ptr_mat = try va_list_ptr_vi.matReg(isel);
+ const offs_ra = try isel.allocIntReg();
+ defer isel.freeReg(offs_ra);
+ const stack_ra = try isel.allocIntReg();
+ defer isel.freeReg(stack_ra);
+
+ var part_vis: [2]Value.Index = undefined;
+ var arg_part_ras: [2]?Register.Alias = @splat(null);
+ const parts_len = parts_len: {
+ var parts_len: u2 = 0;
+ var part_it = passed_vi.parts(isel);
+ while (part_it.next()) |part_vi| : (parts_len += 1) {
+ part_vis[parts_len] = part_vi;
+ const arg_vi = maybe_arg_vi orelse continue;
+ const part_offset, const part_size = part_vi.position(isel);
+ var arg_part_it = arg_vi.value.field(ty, part_offset, part_size);
+ const arg_part_vi = try arg_part_it.only(isel);
+ arg_part_ras[parts_len] = try arg_part_vi.?.defReg(isel);
+ }
+ break :parts_len parts_len;
+ };
+
+ const done_label = isel.instructions.items.len;
+ try isel.emit(.str(stack_ra.x(), .{ .unsigned_offset = .{
+ .base = va_list_ptr_mat.ra.x(),
+ .offset = 0,
+ } }));
+ try isel.emit(switch (parts_len) {
+ else => unreachable,
+ 1 => if (arg_part_ras[0]) |arg_part_ra| switch (part_vis[0].size(isel)) {
+ else => unreachable,
+ 1 => if (arg_part_ra.isVector()) .ldr(arg_part_ra.b(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }) else switch (part_vis[0].signedness(isel)) {
+ .signed => .ldrsb(arg_part_ra.w(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ .unsigned => .ldrb(arg_part_ra.w(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ },
+ 2 => if (arg_part_ra.isVector()) .ldr(arg_part_ra.h(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }) else switch (part_vis[0].signedness(isel)) {
+ .signed => .ldrsh(arg_part_ra.w(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ .unsigned => .ldrh(arg_part_ra.w(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ },
+ 4 => .ldr(if (arg_part_ra.isVector()) arg_part_ra.s() else arg_part_ra.w(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ 8 => .ldr(if (arg_part_ra.isVector()) arg_part_ra.d() else arg_part_ra.x(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ 16 => .ldr(arg_part_ra.q(), .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } }),
+ } else .add(stack_ra.x(), stack_ra.x(), .{ .immediate = passed_size }),
+ 2 => if (arg_part_ras[0] != null or arg_part_ras[1] != null) .ldp(
+ @as(Register.Alias, arg_part_ras[0] orelse .zr).x(),
+ @as(Register.Alias, arg_part_ras[1] orelse .zr).x(),
+ .{ .post_index = .{
+ .base = stack_ra.x(),
+ .index = passed_size,
+ } },
+ ) else .add(stack_ra.x(), stack_ra.x(), .{ .immediate = passed_size }),
+ });
+ try isel.emit(.ldr(stack_ra.x(), .{ .unsigned_offset = .{
+ .base = va_list_ptr_mat.ra.x(),
+ .offset = 0,
+ } }));
+ switch (isel.va_list) {
+ .other => {},
+ .sysv => {
+ const stack_label = isel.instructions.items.len;
+ try isel.emit(.b(
+ @intCast((isel.instructions.items.len + 1 - done_label) << 2),
+ ));
+ switch (parts_len) {
+ else => unreachable,
+ 1 => if (arg_part_ras[0]) |arg_part_ra| try isel.emit(switch (part_vis[0].size(isel)) {
+ else => unreachable,
+ 1 => if (arg_part_ra.isVector()) .ldr(arg_part_ra.b(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }) else switch (part_vis[0].signedness(isel)) {
+ .signed => .ldrsb(arg_part_ra.w(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ .unsigned => .ldrb(arg_part_ra.w(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ },
+ 2 => if (arg_part_ra.isVector()) .ldr(arg_part_ra.h(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }) else switch (part_vis[0].signedness(isel)) {
+ .signed => .ldrsh(arg_part_ra.w(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ .unsigned => .ldrh(arg_part_ra.w(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ },
+ 4 => .ldr(if (arg_part_ra.isVector()) arg_part_ra.s() else arg_part_ra.w(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ 8 => .ldr(if (arg_part_ra.isVector()) arg_part_ra.d() else arg_part_ra.x(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ 16 => .ldr(arg_part_ra.q(), .{ .extended_register = .{
+ .base = stack_ra.x(),
+ .index = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }),
+ }),
+ 2 => if (arg_part_ras[0] != null or arg_part_ras[1] != null) {
+ try isel.emit(.ldp(
+ @as(Register.Alias, arg_part_ras[0] orelse .zr).x(),
+ @as(Register.Alias, arg_part_ras[1] orelse .zr).x(),
+ .{ .base = stack_ra.x() },
+ ));
+ try isel.emit(.add(stack_ra.x(), stack_ra.x(), .{ .extended_register = .{
+ .register = offs_ra.w(),
+ .extend = .{ .sxtw = 0 },
+ } }));
+ },
+ }
+ try isel.emit(.ldr(stack_ra.x(), .{ .unsigned_offset = .{
+ .base = va_list_ptr_mat.ra.x(),
+ .offset = if (passed_is_vector) 16 else 8,
+ } }));
+ try isel.emit(.@"b."(
+ .gt,
+ @intCast((isel.instructions.items.len + 1 - stack_label) << 2),
+ ));
+ try isel.emit(.str(stack_ra.w(), .{ .unsigned_offset = .{
+ .base = va_list_ptr_mat.ra.x(),
+ .offset = if (passed_is_vector) 28 else 24,
+ } }));
+ try isel.emit(.adds(stack_ra.w(), offs_ra.w(), .{ .immediate = passed_size }));
+ try isel.emit(.tbz(
+ offs_ra.w(),
+ 31,
+ @intCast((isel.instructions.items.len + 1 - stack_label) << 2),
+ ));
+ try isel.emit(.ldr(offs_ra.w(), .{ .unsigned_offset = .{
+ .base = va_list_ptr_mat.ra.x(),
+ .offset = if (passed_is_vector) 28 else 24,
+ } }));
+ },
+ }
+ try va_list_ptr_mat.finish(isel);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .c_va_copy => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |va_list_vi| {
+ defer va_list_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const va_list_ptr_vi = try isel.use(ty_op.operand);
+ const va_list_ptr_mat = try va_list_ptr_vi.matReg(isel);
+ _ = try va_list_vi.value.load(isel, ty_op.ty.toType(), va_list_ptr_mat.ra, .{});
+ try va_list_ptr_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .c_va_end => if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .c_va_start => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |va_list_vi| {
+ defer va_list_vi.value.deref(isel);
+ const ty = air.data(air.inst_index).ty;
+ switch (isel.va_list) {
+ .other => |va_list| if (try va_list_vi.value.defReg(isel)) |va_list_ra| try isel.emit(.add(
+ va_list_ra.x(),
+ va_list.base.x(),
+ .{ .immediate = @intCast(va_list.offset) },
+ )),
+ .sysv => |va_list| {
+ var vr_offs_it = va_list_vi.value.field(ty, 28, 4);
+ const vr_offs_vi = try vr_offs_it.only(isel);
+ if (try vr_offs_vi.?.defReg(isel)) |vr_offs_ra| try isel.movImmediate(
+ vr_offs_ra.w(),
+ @as(u32, @bitCast(va_list.__vr_offs)),
+ );
+ var gr_offs_it = va_list_vi.value.field(ty, 24, 4);
+ const gr_offs_vi = try gr_offs_it.only(isel);
+ if (try gr_offs_vi.?.defReg(isel)) |gr_offs_ra| try isel.movImmediate(
+ gr_offs_ra.w(),
+ @as(u32, @bitCast(va_list.__gr_offs)),
+ );
+ var vr_top_it = va_list_vi.value.field(ty, 16, 8);
+ const vr_top_vi = try vr_top_it.only(isel);
+ if (try vr_top_vi.?.defReg(isel)) |vr_top_ra| try isel.emit(.add(
+ vr_top_ra.x(),
+ va_list.__vr_top.base.x(),
+ .{ .immediate = @intCast(va_list.__vr_top.offset) },
+ ));
+ var gr_top_it = va_list_vi.value.field(ty, 8, 8);
+ const gr_top_vi = try gr_top_it.only(isel);
+ if (try gr_top_vi.?.defReg(isel)) |gr_top_ra| try isel.emit(.add(
+ gr_top_ra.x(),
+ va_list.__gr_top.base.x(),
+ .{ .immediate = @intCast(va_list.__gr_top.offset) },
+ ));
+ var stack_it = va_list_vi.value.field(ty, 0, 8);
+ const stack_vi = try stack_it.only(isel);
+ if (try stack_vi.?.defReg(isel)) |stack_ra| try isel.emit(.add(
+ stack_ra.x(),
+ va_list.__stack.base.x(),
+ .{ .immediate = @intCast(va_list.__stack.offset) },
+ ));
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .work_item_id, .work_group_size, .work_group_id => unreachable,
+ }
+ assert(air.body_index == 0);
+}
+
+pub fn verify(isel: *Select, check_values: bool) void {
+ if (!std.debug.runtime_safety) return;
+ assert(isel.blocks.count() == 1 and isel.blocks.keys()[0] == Select.Block.main);
+ assert(isel.active_loops.items.len == 0);
+ assert(isel.dom_start == 0 and isel.dom_len == 0);
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => {
+ isel.dumpValues(.all);
+ unreachable;
+ },
+ .allocating, .free => {},
+ };
+ if (check_values) for (isel.values.items) |value| if (value.refs != 0) {
+ isel.dumpValues(.only_referenced);
+ unreachable;
+ };
+}
+
+/// Stack Frame Layout
+/// +-+-----------------------------------+
+/// |R| allocated stack |
+/// +-+-----------------------------------+
+/// |S| caller frame record | +---------------+
+/// +-+-----------------------------------+ <-| entry/exit FP |
+/// |R| caller frame | +---------------+
+/// +-+-----------------------------------+
+/// |R| variable incoming stack arguments | +---------------+
+/// +-+-----------------------------------+ <-| __stack |
+/// |S| named incoming stack arguments | +---------------+
+/// +-+-----------------------------------+ <-| entry/exit SP |
+/// |S| incoming gr arguments | | __gr_top |
+/// +-+-----------------------------------+ +---------------+
+/// |S| alignment gap |
+/// +-+-----------------------------------+
+/// |S| frame record | +----------+
+/// +-+-----------------------------------+ <-| FP |
+/// |S| incoming vr arguments | | __vr_top |
+/// +-+-----------------------------------+ +----------+
+/// |L| alignment gap |
+/// +-+-----------------------------------+
+/// |L| callee saved vr area |
+/// +-+-----------------------------------+
+/// |L| callee saved gr area | +----------------------+
+/// +-+-----------------------------------+ <-| prologue/epilogue SP |
+/// |R| realignment gap | +----------------------+
+/// +-+-----------------------------------+
+/// |L| locals |
+/// +-+-----------------------------------+
+/// |S| outgoing stack arguments | +----+
+/// +-+-----------------------------------+ <-| SP |
+/// |R| unallocated stack | +----+
+/// +-+-----------------------------------+
+/// [S] Size computed by `analyze`, can be used by the body.
+/// [L] Size computed by `layout`, can be used by the prologue/epilogue.
+/// [R] Size unknown until runtime, can vary from one call to the next.
+///
+/// Constraints that led to this layout:
+/// * FP to __stack/__gr_top/__vr_top must only pass through [S]
+/// * SP to outgoing stack arguments/locals must only pass through [S]
+/// * entry/exit SP to prologue/epilogue SP must only pass through [S/L]
+/// * all save areas must be at a positive offset from prologue/epilogue SP
+/// * the entry/exit SP to prologue/epilogue SP distance must
+/// - be a multiple of 16 due to hardware restrictions on the value of SP
+/// - conform to the limit from the first matching condition in the
+/// following list due to instruction encoding limitations
+/// 1. callee saved gr count >= 2: multiple of 8 of at most 504 bytes
+/// 2. callee saved vr count >= 2: multiple of 8 of at most 504 bytes
+/// 3. callee saved gr count >= 1: at most 255 bytes
+/// 4. callee saved vr count >= 1: at most 255 bytes
+/// 5. variable incoming vr argument count >= 2: multiple of 16 of at most 1008 bytes
+/// 6. variable incoming vr argument count >= 1: at most 255 bytes
+/// 7. have frame record: multiple of 8 of at most 504 bytes
+pub fn layout(
+ isel: *Select,
+ incoming: CallAbiIterator,
+ is_sysv_var_args: bool,
+ saved_gra_len: u7,
+ saved_vra_len: u7,
+ mod: *const Package.Module,
+) !usize {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(isel.nav_index);
+ wip_mir_log.debug("{f}<body>:\n", .{nav.fqn.fmt(ip)});
+
+ const stack_size: u24 = @intCast(InternPool.Alignment.@"16".forward(isel.stack_size));
+
+ var saves_buf: [10 + 8 + 8 + 2 + 8]struct {
+ class: enum { integer, vector },
+ needs_restore: bool,
+ register: Register,
+ offset: u10,
+ size: u5,
+ } = undefined;
+ const saves, const saves_size, const frame_record_offset = saves: {
+ var saves_len: usize = 0;
+ var saves_size: u10 = 0;
+ var save_ra: Register.Alias = undefined;
+
+ // callee saved gr area
+ save_ra = .r19;
+ while (save_ra != .r29) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ if (!isel.saved_registers.contains(save_ra)) continue;
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = save_ra.x(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+ var deferred_gr = if (saves_size == 8 or (saves_size % 16 != 0 and saved_gra_len % 2 != 0)) gr: {
+ saves_len -= 1;
+ saves_size -= 8;
+ break :gr saves_buf[saves_len].register;
+ } else null;
+ defer assert(deferred_gr == null);
+
+ // callee saved vr area
+ save_ra = .v8;
+ while (save_ra != .v16) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ if (!isel.saved_registers.contains(save_ra)) continue;
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .vector,
+ .needs_restore = true,
+ .register = save_ra.d(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+ if (deferred_gr != null and saved_gra_len % 2 == 0) {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = deferred_gr.?,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ deferred_gr = null;
+ }
+ if (saves_size % 16 != 0 and saved_vra_len % 2 != 0) {
+ const prev_save = &saves_buf[saves_len - 1];
+ switch (prev_save.class) {
+ .integer => {},
+ .vector => {
+ prev_save.register = prev_save.register.alias.q();
+ prev_save.size = 16;
+ saves_size += 8;
+ },
+ }
+ }
+
+ // incoming vr arguments
+ save_ra = if (mod.strip) incoming.nsrn else CallAbiIterator.nsrn_start;
+ while (save_ra != if (is_sysv_var_args) CallAbiIterator.nsrn_end else incoming.nsrn) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ saves_size = std.mem.alignForward(u10, saves_size, 16);
+ saves_buf[saves_len] = .{
+ .class = .vector,
+ .needs_restore = false,
+ .register = save_ra.q(),
+ .offset = saves_size,
+ .size = 16,
+ };
+ saves_len += 1;
+ saves_size += 16;
+ }
+
+ // frame record
+ saves_size = std.mem.alignForward(u10, saves_size, 16);
+ const frame_record_offset = saves_size;
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = .fp,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = .lr,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+
+ // incoming gr arguments
+ if (deferred_gr) |gr| {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = gr,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ deferred_gr = null;
+ } else switch (@as(u1, @truncate(saved_gra_len))) {
+ 0 => {},
+ 1 => saves_size += 8,
+ }
+ save_ra = if (mod.strip) incoming.ngrn else CallAbiIterator.ngrn_start;
+ while (save_ra != if (is_sysv_var_args) CallAbiIterator.ngrn_end else incoming.ngrn) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = false,
+ .register = save_ra.x(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+
+ assert(InternPool.Alignment.@"16".check(saves_size));
+ break :saves .{ saves_buf[0..saves_len], saves_size, frame_record_offset };
+ };
+
+ {
+ wip_mir_log.debug("{f}<prologue>:", .{nav.fqn.fmt(ip)});
+ var save_index: usize = 0;
+ while (save_index < saves.len) if (save_index + 2 <= saves.len and
+ saves[save_index + 0].class == saves[save_index + 1].class and
+ saves[save_index + 0].size == saves[save_index + 1].size and
+ saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
+ {
+ try isel.emit(.stp(
+ saves[save_index + 0].register,
+ saves[save_index + 1].register,
+ switch (saves[save_index + 0].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .signed_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 2;
+ } else {
+ try isel.emit(.str(
+ saves[save_index].register,
+ switch (saves[save_index].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .unsigned_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 1;
+ };
+
+ try isel.emit(.add(.fp, .sp, .{ .immediate = frame_record_offset }));
+ const scratch_reg: Register = if (isel.stack_align == .@"16")
+ .sp
+ else if (stack_size == 0 and frame_record_offset == 0)
+ .fp
+ else
+ .ip0;
+ const stack_size_lo: u12 = @truncate(stack_size >> 0);
+ const stack_size_hi: u12 = @truncate(stack_size >> 12);
+ if (mod.stack_check) {
+ if (stack_size_hi > 2) {
+ try isel.movImmediate(.ip1, stack_size_hi);
+ const loop_label = isel.instructions.items.len;
+ try isel.emit(.sub(.sp, .sp, .{
+ .shifted_immediate = .{ .immediate = 1, .lsl = .@"12" },
+ }));
+ try isel.emit(.sub(.ip1, .ip1, .{ .immediate = 1 }));
+ try isel.emit(.ldr(.xzr, .{ .base = .sp }));
+ try isel.emit(.cbnz(.ip1, -@as(i21, @intCast(
+ (isel.instructions.items.len - loop_label) << 2,
+ ))));
+ } else for (0..stack_size_hi) |_| {
+ try isel.emit(.sub(.sp, .sp, .{
+ .shifted_immediate = .{ .immediate = 1, .lsl = .@"12" },
+ }));
+ try isel.emit(.ldr(.xzr, .{ .base = .sp }));
+ }
+ if (stack_size_lo > 0) try isel.emit(.sub(
+ scratch_reg,
+ .sp,
+ .{ .immediate = stack_size_lo },
+ )) else if (scratch_reg.alias == Register.Alias.ip0)
+ try isel.emit(.add(scratch_reg, .sp, .{ .immediate = 0 }));
+ } else {
+ if (stack_size_hi > 0) try isel.emit(.sub(scratch_reg, .sp, .{
+ .shifted_immediate = .{ .immediate = stack_size_hi, .lsl = .@"12" },
+ }));
+ if (stack_size_lo > 0) try isel.emit(.sub(
+ scratch_reg,
+ if (stack_size_hi > 0) scratch_reg else .sp,
+ .{ .immediate = stack_size_lo },
+ )) else if (scratch_reg.alias == Register.Alias.ip0 and stack_size_hi == 0)
+ try isel.emit(.add(scratch_reg, .sp, .{ .immediate = 0 }));
+ }
+ if (isel.stack_align != .@"16") try isel.emit(.@"and"(.sp, scratch_reg, .{ .immediate = .{
+ .N = .doubleword,
+ .immr = -%isel.stack_align.toLog2Units(),
+ .imms = ~isel.stack_align.toLog2Units(),
+ } }));
+ wip_mir_log.debug("", .{});
+ }
+
+ const epilogue = isel.instructions.items.len;
+ if (isel.returns) {
+ try isel.emit(.ret(.lr));
+ var save_index: usize = 0;
+ var first_offset: ?u10 = null;
+ while (save_index < saves.len) {
+ if (save_index + 2 <= saves.len and saves[save_index + 1].needs_restore and
+ saves[save_index + 0].class == saves[save_index + 1].class and
+ saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
+ {
+ try isel.emit(.ldp(
+ saves[save_index + 0].register,
+ saves[save_index + 1].register,
+ if (first_offset) |offset| .{ .signed_offset = .{
+ .base = .sp,
+ .offset = @intCast(saves[save_index + 0].offset - offset),
+ } } else form: {
+ first_offset = @intCast(saves[save_index + 0].offset);
+ break :form .{ .post_index = .{
+ .base = .sp,
+ .index = @intCast(saves_size - first_offset.?),
+ } };
+ },
+ ));
+ save_index += 2;
+ } else if (saves[save_index].needs_restore) {
+ try isel.emit(.ldr(
+ saves[save_index].register,
+ if (first_offset) |offset| .{ .unsigned_offset = .{
+ .base = .sp,
+ .offset = saves[save_index + 0].offset - offset,
+ } } else form: {
+ const offset = saves[save_index + 0].offset;
+ first_offset = offset;
+ break :form .{ .post_index = .{
+ .base = .sp,
+ .index = @intCast(saves_size - offset),
+ } };
+ },
+ ));
+ save_index += 1;
+ } else save_index += 1;
+ }
+ const offset = stack_size + first_offset.?;
+ const offset_lo: u12 = @truncate(offset >> 0);
+ const offset_hi: u12 = @truncate(offset >> 12);
+ if (isel.stack_align != .@"16" or (offset_lo > 0 and offset_hi > 0)) {
+ const fp_offset = @as(i11, first_offset.?) - frame_record_offset;
+ try isel.emit(if (fp_offset >= 0)
+ .add(.sp, .fp, .{ .immediate = @intCast(fp_offset) })
+ else
+ .sub(.sp, .fp, .{ .immediate = @intCast(-fp_offset) }));
+ } else {
+ if (offset_hi > 0) try isel.emit(.add(.sp, .sp, .{
+ .shifted_immediate = .{ .immediate = offset_hi, .lsl = .@"12" },
+ }));
+ if (offset_lo > 0) try isel.emit(.add(.sp, .sp, .{
+ .immediate = offset_lo,
+ }));
+ }
+ wip_mir_log.debug("{f}<epilogue>:\n", .{nav.fqn.fmt(ip)});
+ }
+ return epilogue;
+}
+
+fn fmtDom(isel: *Select, inst: Air.Inst.Index, start: u32, len: u32) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ start: u32,
+ len: u32,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ try writer.print("%{d} -> {{", .{@intFromEnum(data.inst)});
+ var first = true;
+ for (data.isel.blocks.keys()[0..data.len], 0..) |block_inst_index, dom_index| {
+ if (@as(u1, @truncate(data.isel.dom.items[
+ data.start + dom_index / @bitSizeOf(DomInt)
+ ] >> @truncate(dom_index))) == 0) continue;
+ if (first) {
+ first = false;
+ } else {
+ try writer.writeByte(',');
+ }
+ switch (block_inst_index) {
+ Block.main => try writer.writeAll(" %main"),
+ else => try writer.print(" %{d}", .{@intFromEnum(block_inst_index)}),
+ }
+ }
+ if (!first) try writer.writeByte(' ');
+ try writer.writeByte('}');
+ }
+} {
+ return .{ .isel = isel, .inst = inst, .start = start, .len = len };
+}
+
+fn fmtLoopLive(isel: *Select, loop_inst: Air.Inst.Index) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ const loops = data.isel.loops.values();
+ const loop_index = data.isel.loops.getIndex(data.inst).?;
+ const live_insts =
+ data.isel.loop_live.list.items[loops[loop_index].live..loops[loop_index + 1].live];
+
+ try writer.print("%{d} <- {{", .{@intFromEnum(data.inst)});
+ var first = true;
+ for (live_insts) |live_inst| {
+ if (first) {
+ first = false;
+ } else {
+ try writer.writeByte(',');
+ }
+ try writer.print(" %{d}", .{@intFromEnum(live_inst)});
+ }
+ if (!first) try writer.writeByte(' ');
+ try writer.writeByte('}');
+ }
+} {
+ return .{ .isel = isel, .inst = loop_inst };
+}
+
+fn fmtType(isel: *Select, ty: ZigType) ZigType.Formatter {
+ return ty.fmt(isel.pt);
+}
+
+fn fmtConstant(isel: *Select, constant: Constant) @typeInfo(@TypeOf(Constant.fmtValue)).@"fn".return_type.? {
+ return constant.fmtValue(isel.pt);
+}
+
+fn block(
+ isel: *Select,
+ air_inst_index: Air.Inst.Index,
+ res_ty: ZigType,
+ air_body: []const Air.Inst.Index,
+) !void {
+ if (res_ty.toIntern() != .noreturn_type) {
+ isel.blocks.putAssumeCapacityNoClobber(air_inst_index, .{
+ .live_registers = isel.live_registers,
+ .target_label = @intCast(isel.instructions.items.len),
+ });
+ }
+ try isel.body(air_body);
+ if (res_ty.toIntern() != .noreturn_type) {
+ const block_entry = isel.blocks.pop().?;
+ assert(block_entry.key == air_inst_index);
+ if (isel.live_values.fetchRemove(air_inst_index)) |result_vi| result_vi.value.deref(isel);
+ }
+}
+
+fn emit(isel: *Select, instruction: codegen.aarch64.encoding.Instruction) !void {
+ wip_mir_log.debug(" | {f}", .{instruction});
+ try isel.instructions.append(isel.pt.zcu.gpa, instruction);
+}
+
+fn emitPanic(isel: *Select, panic_id: Zcu.SimplePanicId) !void {
+ const zcu = isel.pt.zcu;
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = switch (zcu.intern_pool.indexToKey(zcu.builtin_decl_values.get(panic_id.toBuiltin()))) {
+ else => unreachable,
+ inline .@"extern", .func => |func| func.owner_nav,
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+}
+
+fn emitLiteral(isel: *Select, bytes: []const u8) !void {
+ const words: []align(1) const u32 = @ptrCast(bytes);
+ const literals = try isel.literals.addManyAsSlice(isel.pt.zcu.gpa, words.len);
+ switch (isel.target.cpu.arch.endian()) {
+ .little => @memcpy(literals, words),
+ .big => for (words, 0..) |word, word_index| {
+ literals[literals.len - 1 - word_index] = @byteSwap(word);
+ },
+ }
+}
+
+fn fail(isel: *Select, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ @branchHint(.cold);
+ return isel.pt.zcu.codegenFail(isel.nav_index, format, args);
+}
+
+/// dst = src
+fn movImmediate(isel: *Select, dst_reg: Register, src_imm: u64) !void {
+ const sf = dst_reg.format.integer;
+ if (src_imm == 0) {
+ const zr: Register = switch (sf) {
+ .word => .wzr,
+ .doubleword => .xzr,
+ };
+ return isel.emit(.orr(dst_reg, zr, .{ .register = zr }));
+ }
+
+ const Part = u16;
+ const min_part: Part = std.math.minInt(Part);
+ const max_part: Part = std.math.maxInt(Part);
+
+ const parts: [4]Part = @bitCast(switch (sf) {
+ .word => @as(u32, @intCast(src_imm)),
+ .doubleword => @as(u64, @intCast(src_imm)),
+ });
+ const width: u7 = switch (sf) {
+ .word => 32,
+ .doubleword => 64,
+ };
+ const parts_len: u3 = @intCast(@divExact(width, @bitSizeOf(Part)));
+ var equal_min_count: u3 = 0;
+ var equal_max_count: u3 = 0;
+ for (parts[0..parts_len]) |part| {
+ equal_min_count += @intFromBool(part == min_part);
+ equal_max_count += @intFromBool(part == max_part);
+ }
+
+ const equal_fill_count, const fill_part: Part = if (equal_min_count >= equal_max_count)
+ .{ equal_min_count, min_part }
+ else
+ .{ equal_max_count, max_part };
+ var remaining_parts = @max(parts_len - equal_fill_count, 1);
+
+ if (remaining_parts > 1) {
+ var elem_width: u8 = 2;
+ while (elem_width <= width) : (elem_width <<= 1) {
+ const emask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - elem_width);
+ const rmask = @divExact(@as(u64, switch (sf) {
+ .word => std.math.maxInt(u32),
+ .doubleword => std.math.maxInt(u64),
+ }), emask);
+ const elem = src_imm & emask;
+ if (src_imm != elem * rmask) continue;
+ const imask: u64 = @bitCast(@as(i64, @bitCast(elem << 63)) >> 63);
+ const lsb0 = elem ^ (imask & emask);
+ const lsb1 = (lsb0 - 1) | lsb0;
+ if ((lsb1 +% 1) & lsb1 == 0) {
+ const lo: u6 = @intCast(@ctz(lsb0));
+ const hi: u6 = @intCast(@clz(lsb0) - (64 - elem_width));
+ const mid: u6 = @intCast(elem_width - lo - hi);
+ const smask: u6 = @truncate(imask);
+ const mid_masked = mid & ~smask;
+ return isel.emit(.orr(
+ dst_reg,
+ switch (sf) {
+ .word => .wzr,
+ .doubleword => .xzr,
+ },
+ .{ .immediate = .{
+ .N = @enumFromInt(elem_width >> 6),
+ .immr = hi + mid_masked,
+ .imms = ((((lo + hi) & smask) | mid_masked) - 1) | -%@as(u6, @truncate(elem_width)) << 1,
+ } },
+ ));
+ }
+ }
+ }
+
+ var part_index = parts_len;
+ while (part_index > 0) {
+ part_index -= 1;
+ if (part_index >= remaining_parts and parts[part_index] == fill_part) continue;
+ remaining_parts -= 1;
+ try isel.emit(if (remaining_parts > 0) .movk(
+ dst_reg,
+ parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ) else switch (fill_part) {
+ else => unreachable,
+ min_part => .movz(
+ dst_reg,
+ parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ),
+ max_part => .movn(
+ dst_reg,
+ ~parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ),
+ });
+ }
+ assert(remaining_parts == 0);
+}
+
+/// elem_ptr = base +- elem_size * index
+/// elem_ptr, base, and index may alias
+fn elemPtr(
+ isel: *Select,
+ elem_ptr_ra: Register.Alias,
+ base_ra: Register.Alias,
+ op: codegen.aarch64.encoding.Instruction.AddSubtractOp,
+ elem_size: u64,
+ index_vi: Value.Index,
+) !void {
+ const index_mat = try index_vi.matReg(isel);
+ switch (@popCount(elem_size)) {
+ 0 => unreachable,
+ 1 => try isel.emit(switch (op) {
+ .add => switch (base_ra) {
+ else => .add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(@ctz(elem_size)) },
+ } }),
+ .zr => switch (@ctz(elem_size)) {
+ 0 => .orr(elem_ptr_ra.x(), .xzr, .{ .register = index_mat.ra.x() }),
+ else => |shift| .ubfm(elem_ptr_ra.x(), index_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - shift),
+ .imms = @intCast(63 - shift),
+ }),
+ },
+ },
+ .sub => .sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(@ctz(elem_size)) },
+ } }),
+ }),
+ 2 => {
+ const shift: u6 = @intCast(@ctz(elem_size));
+ const temp_ra = temp_ra: switch (op) {
+ .add => switch (base_ra) {
+ else => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .zr => {
+ if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
+ .N = .doubleword,
+ .immr = -%shift,
+ .imms = ~shift,
+ }));
+ break :temp_ra elem_ptr_ra;
+ },
+ },
+ .sub => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ };
+ defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
+ try isel.emit(.add(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(63 - @clz(elem_size) - shift) },
+ } }));
+ },
+ else => {
+ const elem_size_lsb1 = (elem_size - 1) | elem_size;
+ if ((elem_size_lsb1 +% 1) & elem_size_lsb1 == 0) {
+ const shift: u6 = @intCast(@ctz(elem_size));
+ const temp_ra = temp_ra: switch (op) {
+ .add => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .sub => switch (base_ra) {
+ else => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .zr => {
+ if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
+ .N = .doubleword,
+ .immr = -%shift,
+ .imms = ~shift,
+ }));
+ break :temp_ra elem_ptr_ra;
+ },
+ },
+ };
+ defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
+ try isel.emit(.sub(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(64 - @clz(elem_size) - shift) },
+ } }));
+ } else {
+ try isel.emit(switch (op) {
+ .add => .madd(elem_ptr_ra.x(), index_mat.ra.x(), elem_ptr_ra.x(), base_ra.x()),
+ .sub => .msub(elem_ptr_ra.x(), index_mat.ra.x(), elem_ptr_ra.x(), base_ra.x()),
+ });
+ try isel.movImmediate(elem_ptr_ra.x(), elem_size);
+ }
+ },
+ }
+ try index_mat.finish(isel);
+}
+
+fn clzLimb(
+ isel: *Select,
+ res_ra: Register.Alias,
+ src_int_info: std.builtin.Type.Int,
+ src_ra: Register.Alias,
+) !void {
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ try isel.emit(.sub(res_ra.w(), res_ra.w(), .{
+ .immediate = @intCast(32 - bits),
+ }));
+ switch (src_int_info.signedness) {
+ .signed => {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.ubfm(res_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.clz(res_ra.w(), src_ra.w())),
+ }
+ },
+ 32 => try isel.emit(.clz(res_ra.w(), src_ra.w())),
+ 33...63 => |bits| {
+ try isel.emit(.sub(res_ra.w(), res_ra.w(), .{
+ .immediate = @intCast(64 - bits),
+ }));
+ switch (src_int_info.signedness) {
+ .signed => {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.ubfm(res_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.clz(res_ra.x(), src_ra.x())),
+ }
+ },
+ 64 => try isel.emit(.clz(res_ra.x(), src_ra.x())),
+ }
+}
+
+fn ctzLimb(
+ isel: *Select,
+ res_ra: Register.Alias,
+ src_int_info: std.builtin.Type.Int,
+ src_ra: Register.Alias,
+) !void {
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.rbit(res_ra.w(), res_ra.w()));
+ try isel.emit(.orr(res_ra.w(), src_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } }));
+ },
+ 32 => {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.rbit(res_ra.w(), src_ra.w()));
+ },
+ 33...63 => |bits| {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.rbit(res_ra.x(), res_ra.x()));
+ try isel.emit(.orr(res_ra.x(), src_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } }));
+ },
+ 64 => {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.rbit(res_ra.x(), src_ra.x()));
+ },
+ }
+}
+
+fn loadReg(
+ isel: *Select,
+ ra: Register.Alias,
+ size: u64,
+ signedness: std.builtin.Signedness,
+ base_ra: Register.Alias,
+ offset: i65,
+) !void {
+ switch (size) {
+ 0 => unreachable,
+ 1 => {
+ if (std.math.cast(u12, offset)) |unsigned_offset| return isel.emit(if (ra.isVector()) .ldr(
+ ra.b(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else switch (signedness) {
+ .signed => .ldrsb(ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } }),
+ .unsigned => .ldrb(ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } }),
+ });
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .ldur(ra.b(), base_ra.x(), signed_offset)
+ else switch (signedness) {
+ .signed => .ldursb(ra.w(), base_ra.x(), signed_offset),
+ .unsigned => .ldurb(ra.w(), base_ra.x(), signed_offset),
+ });
+ },
+ 2 => {
+ if (std.math.cast(u13, offset)) |unsigned_offset| if (unsigned_offset % 2 == 0)
+ return isel.emit(if (ra.isVector()) .ldr(
+ ra.h(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else switch (signedness) {
+ .signed => .ldrsh(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ),
+ .unsigned => .ldrh(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ),
+ });
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .ldur(ra.h(), base_ra.x(), signed_offset)
+ else switch (signedness) {
+ .signed => .ldursh(ra.w(), base_ra.x(), signed_offset),
+ .unsigned => .ldurh(ra.w(), base_ra.x(), signed_offset),
+ });
+ },
+ 3 => {
+ const lo16_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo16_ra);
+ try isel.emit(.orr(ra.w(), lo16_ra.w(), .{ .shifted_register = .{
+ .register = ra.w(),
+ .shift = .{ .lsl = 16 },
+ } }));
+ try isel.loadReg(ra, 1, signedness, base_ra, offset + 2);
+ return isel.loadReg(lo16_ra, 2, .unsigned, base_ra, offset);
+ },
+ 4 => {
+ if (std.math.cast(u14, offset)) |unsigned_offset| if (unsigned_offset % 4 == 0) return isel.emit(.ldr(
+ if (ra.isVector()) ra.s() else ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.ldur(
+ if (ra.isVector()) ra.s() else ra.w(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 5, 6 => {
+ const lo32_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo32_ra);
+ try isel.emit(.orr(ra.x(), lo32_ra.x(), .{ .shifted_register = .{
+ .register = ra.x(),
+ .shift = .{ .lsl = 32 },
+ } }));
+ try isel.loadReg(ra, size - 4, signedness, base_ra, offset + 4);
+ return isel.loadReg(lo32_ra, 4, .unsigned, base_ra, offset);
+ },
+ 7 => {
+ const lo32_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo32_ra);
+ const lo48_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo48_ra);
+ try isel.emit(.orr(ra.x(), lo48_ra.x(), .{ .shifted_register = .{
+ .register = ra.x(),
+ .shift = .{ .lsl = 32 + 16 },
+ } }));
+ try isel.loadReg(ra, 1, signedness, base_ra, offset + 4 + 2);
+ try isel.emit(.orr(lo48_ra.x(), lo32_ra.x(), .{ .shifted_register = .{
+ .register = lo48_ra.x(),
+ .shift = .{ .lsl = 32 },
+ } }));
+ try isel.loadReg(lo48_ra, 2, .unsigned, base_ra, offset + 4);
+ return isel.loadReg(lo32_ra, 4, .unsigned, base_ra, offset);
+ },
+ 8 => {
+ if (std.math.cast(u15, offset)) |unsigned_offset| if (unsigned_offset % 8 == 0) return isel.emit(.ldr(
+ if (ra.isVector()) ra.d() else ra.x(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.ldur(
+ if (ra.isVector()) ra.d() else ra.x(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 16 => {
+ if (std.math.cast(u16, offset)) |unsigned_offset| if (unsigned_offset % 16 == 0) return isel.emit(.ldr(
+ ra.q(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.ldur(ra.q(), base_ra.x(), signed_offset));
+ },
+ else => return isel.fail("bad load size: {d}", .{size}),
+ }
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ try isel.loadReg(ra, size, signedness, ptr_ra, 0);
+ if (std.math.cast(u24, offset)) |pos_offset| {
+ const lo12: u12 = @truncate(pos_offset >> 0);
+ const hi12: u12 = @intCast(pos_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else if (std.math.cast(u24, -offset)) |neg_offset| {
+ const lo12: u12 = @truncate(neg_offset >> 0);
+ const hi12: u12 = @intCast(neg_offset >> 12);
+ if (hi12 > 0) try isel.emit(.sub(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.sub(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else {
+ try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .register = ptr_ra.x() }));
+ try isel.movImmediate(ptr_ra.x(), @truncate(@as(u65, @bitCast(offset))));
+ }
+}
+
+fn storeReg(
+ isel: *Select,
+ ra: Register.Alias,
+ size: u64,
+ base_ra: Register.Alias,
+ offset: i65,
+) !void {
+ switch (size) {
+ 0 => unreachable,
+ 1 => {
+ if (std.math.cast(u12, offset)) |unsigned_offset| return isel.emit(if (ra.isVector()) .str(
+ ra.b(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else .strb(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .stur(ra.b(), base_ra.x(), signed_offset)
+ else
+ .sturb(ra.w(), base_ra.x(), signed_offset));
+ },
+ 2 => {
+ if (std.math.cast(u13, offset)) |unsigned_offset| if (unsigned_offset % 2 == 0)
+ return isel.emit(if (ra.isVector()) .str(
+ ra.h(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else .strh(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .stur(ra.h(), base_ra.x(), signed_offset)
+ else
+ .sturh(ra.w(), base_ra.x(), signed_offset));
+ },
+ 3 => {
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 2);
+ try isel.storeReg(ra, 2, base_ra, offset);
+ return isel.emit(.ubfm(hi8_ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = 16,
+ .imms = 16 + 8 - 1,
+ }));
+ },
+ 4 => {
+ if (std.math.cast(u14, offset)) |unsigned_offset| if (unsigned_offset % 4 == 0) return isel.emit(.str(
+ if (ra.isVector()) ra.s() else ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(
+ if (ra.isVector()) ra.s() else ra.w(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 5 => {
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ return isel.emit(.ubfm(hi8_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 8 - 1,
+ }));
+ },
+ 6 => {
+ const hi16_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi16_ra);
+ try isel.storeReg(hi16_ra, 2, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ return isel.emit(.ubfm(hi16_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 16 - 1,
+ }));
+ },
+ 7 => {
+ const hi16_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi16_ra);
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 6);
+ try isel.storeReg(hi16_ra, 2, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ try isel.emit(.ubfm(hi8_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32 + 16,
+ .imms = 32 + 16 + 8 - 1,
+ }));
+ return isel.emit(.ubfm(hi16_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 16 - 1,
+ }));
+ },
+ 8 => {
+ if (std.math.cast(u15, offset)) |unsigned_offset| if (unsigned_offset % 8 == 0) return isel.emit(.str(
+ if (ra.isVector()) ra.d() else ra.x(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(
+ if (ra.isVector()) ra.d() else ra.x(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 16 => {
+ if (std.math.cast(u16, offset)) |unsigned_offset| if (unsigned_offset % 16 == 0) return isel.emit(.str(
+ ra.q(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(ra.q(), base_ra.x(), signed_offset));
+ },
+ else => return isel.fail("bad store size: {d}", .{size}),
+ }
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ try isel.storeReg(ra, size, ptr_ra, 0);
+ if (std.math.cast(u24, offset)) |pos_offset| {
+ const lo12: u12 = @truncate(pos_offset >> 0);
+ const hi12: u12 = @intCast(pos_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else if (std.math.cast(u24, -offset)) |neg_offset| {
+ const lo12: u12 = @truncate(neg_offset >> 0);
+ const hi12: u12 = @intCast(neg_offset >> 12);
+ if (hi12 > 0) try isel.emit(.sub(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.sub(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else {
+ try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .register = ptr_ra.x() }));
+ try isel.movImmediate(ptr_ra.x(), @truncate(@as(u65, @bitCast(offset))));
+ }
+}
+
+const DomInt = u8;
+
+pub const Value = struct {
+ refs: u32,
+ flags: Flags,
+ offset_from_parent: u64,
+ parent_payload: Parent.Payload,
+ location_payload: Location.Payload,
+ parts: Value.Index,
+
+ /// Must be at least 16 to compute call abi.
+ /// Must be at least 16, the largest hardware alignment.
+ pub const max_parts = 16;
+ pub const PartsLen = std.math.IntFittingRange(0, Value.max_parts);
+
+ comptime {
+ if (!std.debug.runtime_safety) assert(@sizeOf(Value) == 32);
+ }
+
+ pub const Flags = packed struct(u32) {
+ alignment: InternPool.Alignment,
+ parent_tag: Parent.Tag,
+ location_tag: Location.Tag,
+ parts_len_minus_one: std.math.IntFittingRange(0, Value.max_parts - 1),
+ unused: u18 = 0,
+ };
+
+ pub const Parent = union(enum(u3)) {
+ unallocated: void,
+ stack_slot: Indirect,
+ address: Value.Index,
+ value: Value.Index,
+ constant: Constant,
+
+ pub const Tag = @typeInfo(Parent).@"union".tag_type.?;
+ pub const Payload = @Type(.{ .@"union" = .{
+ .layout = .auto,
+ .tag_type = null,
+ .fields = @typeInfo(Parent).@"union".fields,
+ .decls = &.{},
+ } });
+ };
+
+ pub const Location = union(enum(u1)) {
+ large: struct {
+ size: u64,
+ },
+ small: struct {
+ size: u5,
+ signedness: std.builtin.Signedness,
+ is_vector: bool,
+ hint: Register.Alias,
+ register: Register.Alias,
+ },
+
+ pub const Tag = @typeInfo(Location).@"union".tag_type.?;
+ pub const Payload = @Type(.{ .@"union" = .{
+ .layout = .auto,
+ .tag_type = null,
+ .fields = @typeInfo(Location).@"union".fields,
+ .decls = &.{},
+ } });
+ };
+
+ pub const Indirect = packed struct(u32) {
+ base: Register.Alias,
+ offset: i25,
+
+ pub fn withOffset(ind: Indirect, offset: i25) Indirect {
+ return .{
+ .base = ind.base,
+ .offset = ind.offset + offset,
+ };
+ }
+ };
+
+ pub const Index = enum(u32) {
+ allocating = std.math.maxInt(u32) - 1,
+ free = std.math.maxInt(u32) - 0,
+ _,
+
+ fn get(vi: Value.Index, isel: *Select) *Value {
+ return &isel.values.items[@intFromEnum(vi)];
+ }
+
+ fn setAlignment(vi: Value.Index, isel: *Select, new_alignment: InternPool.Alignment) void {
+ vi.get(isel).flags.alignment = new_alignment;
+ }
+
+ pub fn alignment(vi: Value.Index, isel: *Select) InternPool.Alignment {
+ return vi.get(isel).flags.alignment;
+ }
+
+ pub fn setParent(vi: Value.Index, isel: *Select, new_parent: Parent) void {
+ const value = vi.get(isel);
+ assert(value.flags.parent_tag == .unallocated);
+ value.flags.parent_tag = new_parent;
+ value.parent_payload = switch (new_parent) {
+ .unallocated => unreachable,
+ inline else => |payload, tag| @unionInit(Parent.Payload, @tagName(tag), payload),
+ };
+ if (value.refs > 0) switch (new_parent) {
+ .unallocated => unreachable,
+ .stack_slot, .constant => {},
+ .address, .value => |parent_vi| _ = parent_vi.ref(isel),
+ };
+ }
+
+ pub fn changeStackSlot(vi: Value.Index, isel: *Select, new_stack_slot: Indirect) void {
+ const value = vi.get(isel);
+ assert(value.flags.parent_tag == .stack_slot);
+ value.flags.parent_tag = .unallocated;
+ vi.setParent(isel, .{ .stack_slot = new_stack_slot });
+ }
+
+ pub fn parent(vi: Value.Index, isel: *Select) Parent {
+ const value = vi.get(isel);
+ return switch (value.flags.parent_tag) {
+ inline else => |tag| @unionInit(
+ Parent,
+ @tagName(tag),
+ @field(value.parent_payload, @tagName(tag)),
+ ),
+ };
+ }
+
+ pub fn valueParent(initial_vi: Value.Index, isel: *Select) struct { u64, Value.Index } {
+ var offset: u64 = 0;
+ var vi = initial_vi;
+ parent: switch (vi.parent(isel)) {
+ else => return .{ offset, vi },
+ .value => |parent_vi| {
+ offset += vi.position(isel)[0];
+ vi = parent_vi;
+ continue :parent parent_vi.parent(isel);
+ },
+ }
+ }
+
+ pub fn location(vi: Value.Index, isel: *Select) Location {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ inline else => |tag| @unionInit(
+ Location,
+ @tagName(tag),
+ @field(value.location_payload, @tagName(tag)),
+ ),
+ };
+ }
+
+ pub fn position(vi: Value.Index, isel: *Select) struct { u64, u64 } {
+ return .{ vi.get(isel).offset_from_parent, vi.size(isel) };
+ }
+
+ pub fn size(vi: Value.Index, isel: *Select) u64 {
+ return switch (vi.location(isel)) {
+ inline else => |loc| loc.size,
+ };
+ }
+
+ fn setHint(vi: Value.Index, isel: *Select, new_hint: Register.Alias) void {
+ vi.get(isel).location_payload.small.hint = new_hint;
+ }
+
+ pub fn hint(vi: Value.Index, isel: *Select) ?Register.Alias {
+ return switch (vi.location(isel)) {
+ .large => null,
+ .small => |loc| switch (loc.hint) {
+ .zr => null,
+ else => |hint_reg| hint_reg,
+ },
+ };
+ }
+
+ fn setSignedness(vi: Value.Index, isel: *Select, new_signedness: std.builtin.Signedness) void {
+ const value = vi.get(isel);
+ assert(value.location_payload.small.size <= 2);
+ value.location_payload.small.signedness = new_signedness;
+ }
+
+ pub fn signedness(vi: Value.Index, isel: *Select) std.builtin.Signedness {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ .large => .unsigned,
+ .small => value.location_payload.small.signedness,
+ };
+ }
+
+ fn setIsVector(vi: Value.Index, isel: *Select) void {
+ const is_vector = &vi.get(isel).location_payload.small.is_vector;
+ assert(!is_vector.*);
+ is_vector.* = true;
+ }
+
+ pub fn isVector(vi: Value.Index, isel: *Select) bool {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ .large => false,
+ .small => value.location_payload.small.is_vector,
+ };
+ }
+
+ pub fn register(vi: Value.Index, isel: *Select) ?Register.Alias {
+ return switch (vi.location(isel)) {
+ .large => null,
+ .small => |loc| switch (loc.register) {
+ .zr => null,
+ else => |reg| reg,
+ },
+ };
+ }
+
+ pub fn isUsed(vi: Value.Index, isel: *Select) bool {
+ return vi.valueParent(isel)[1].parent(isel) != .unallocated or vi.hasRegisterRecursive(isel);
+ }
+
+ fn hasRegisterRecursive(vi: Value.Index, isel: *Select) bool {
+ if (vi.register(isel)) |_| return true;
+ var part_it = vi.parts(isel);
+ if (part_it.only() == null) while (part_it.next()) |part_vi| if (part_vi.hasRegisterRecursive(isel)) return true;
+ return false;
+ }
+
+ fn setParts(vi: Value.Index, isel: *Select, parts_len: Value.PartsLen) void {
+ assert(parts_len > 1);
+ const value = vi.get(isel);
+ assert(value.flags.parts_len_minus_one == 0);
+ value.parts = @enumFromInt(isel.values.items.len);
+ value.flags.parts_len_minus_one = @intCast(parts_len - 1);
+ }
+
+ fn addPart(vi: Value.Index, isel: *Select, part_offset: u64, part_size: u64) Value.Index {
+ const part_vi = isel.initValueAdvanced(vi.alignment(isel), part_offset, part_size);
+ tracking_log.debug("${d} <- ${d}[{d}]", .{
+ @intFromEnum(part_vi),
+ @intFromEnum(vi),
+ part_offset,
+ });
+ part_vi.setParent(isel, .{ .value = vi });
+ return part_vi;
+ }
+
+ pub fn parts(vi: Value.Index, isel: *Select) Value.PartIterator {
+ const value = vi.get(isel);
+ return switch (value.flags.parts_len_minus_one) {
+ 0 => .initOne(vi),
+ else => |parts_len_minus_one| .{
+ .vi = value.parts,
+ .remaining = @as(Value.PartsLen, parts_len_minus_one) + 1,
+ },
+ };
+ }
+
+ fn containingParts(vi: Value.Index, isel: *Select, part_offset: u64, part_size: u64) Value.PartIterator {
+ const start_vi = vi.partAtOffset(isel, part_offset);
+ const start_offset, const start_size = start_vi.position(isel);
+ if (part_offset >= start_offset and part_size <= start_size) return .initOne(start_vi);
+ const end_vi = vi.partAtOffset(isel, part_size - 1 + part_offset);
+ return .{
+ .vi = start_vi,
+ .remaining = @intCast(@intFromEnum(end_vi) - @intFromEnum(start_vi) + 1),
+ };
+ }
+ comptime {
+ _ = containingParts;
+ }
+
+ fn partAtOffset(vi: Value.Index, isel: *Select, offset: u64) Value.Index {
+ const SearchPartIndex = std.math.IntFittingRange(0, Value.max_parts * 2 - 1);
+ const value = vi.get(isel);
+ var last: SearchPartIndex = value.flags.parts_len_minus_one;
+ if (last == 0) return vi;
+ var first: SearchPartIndex = 0;
+ last += 1;
+ while (true) {
+ const mid = (first + last) / 2;
+ const mid_vi: Value.Index = @enumFromInt(@intFromEnum(value.parts) + mid);
+ if (mid == first) return mid_vi;
+ if (offset < mid_vi.get(isel).offset_from_parent) last = mid else first = mid;
+ }
+ }
+
+ fn field(
+ vi: Value.Index,
+ ty: ZigType,
+ field_offset: u64,
+ field_size: u64,
+ ) Value.FieldPartIterator {
+ assert(field_size > 0);
+ return .{
+ .vi = vi,
+ .ty = ty,
+ .field_offset = field_offset,
+ .field_size = field_size,
+ .next_offset = 0,
+ };
+ }
+
+ fn ref(initial_vi: Value.Index, isel: *Select) Value.Index {
+ var vi = initial_vi;
+ while (true) {
+ const refs = &vi.get(isel).refs;
+ refs.* += 1;
+ if (refs.* > 1) return initial_vi;
+ switch (vi.parent(isel)) {
+ .unallocated, .stack_slot, .constant => {},
+ .address, .value => |parent_vi| {
+ vi = parent_vi;
+ continue;
+ },
+ }
+ return initial_vi;
+ }
+ }
+
+ pub fn deref(initial_vi: Value.Index, isel: *Select) void {
+ var vi = initial_vi;
+ while (true) {
+ const refs = &vi.get(isel).refs;
+ refs.* -= 1;
+ if (refs.* > 0) return;
+ switch (vi.parent(isel)) {
+ .unallocated, .constant => {},
+ .stack_slot => {
+ // reuse stack slot
+ },
+ .address, .value => |parent_vi| {
+ vi = parent_vi;
+ continue;
+ },
+ }
+ return;
+ }
+ }
+
+ fn move(dst_vi: Value.Index, isel: *Select, src_ref: Air.Inst.Ref) !void {
+ try dst_vi.copy(
+ isel,
+ isel.air.typeOf(src_ref, &isel.pt.zcu.intern_pool),
+ try isel.use(src_ref),
+ );
+ }
+
+ fn copy(dst_vi: Value.Index, isel: *Select, ty: ZigType, src_vi: Value.Index) !void {
+ try dst_vi.copyAdvanced(isel, src_vi, .{
+ .ty = ty,
+ .dst_vi = dst_vi,
+ .dst_offset = 0,
+ .src_vi = src_vi,
+ .src_offset = 0,
+ });
+ }
+
+ fn copyAdvanced(dst_vi: Value.Index, isel: *Select, src_vi: Value.Index, root: struct {
+ ty: ZigType,
+ dst_vi: Value.Index,
+ dst_offset: u64,
+ src_vi: Value.Index,
+ src_offset: u64,
+ }) !void {
+ if (dst_vi == src_vi) return;
+ var dst_part_it = dst_vi.parts(isel);
+ if (dst_part_it.only()) |dst_part_vi| {
+ var src_part_it = src_vi.parts(isel);
+ if (src_part_it.only()) |src_part_vi| {
+ try src_part_vi.liveOut(isel, try dst_part_vi.defReg(isel) orelse return);
+ } else while (src_part_it.next()) |src_part_vi| {
+ const src_part_offset, const src_part_size = src_part_vi.position(isel);
+ var dst_field_it = root.dst_vi.field(root.ty, root.dst_offset + src_part_offset, src_part_size);
+ const dst_field_vi = try dst_field_it.only(isel);
+ try dst_field_vi.?.copyAdvanced(isel, src_part_vi, .{
+ .ty = root.ty,
+ .dst_vi = root.dst_vi,
+ .dst_offset = root.dst_offset + src_part_offset,
+ .src_vi = root.src_vi,
+ .src_offset = root.src_offset + src_part_offset,
+ });
+ }
+ } else while (dst_part_it.next()) |dst_part_vi| {
+ const dst_part_offset, const dst_part_size = dst_part_vi.position(isel);
+ var src_field_it = root.src_vi.field(root.ty, root.src_offset + dst_part_offset, dst_part_size);
+ const src_part_vi = try src_field_it.only(isel);
+ try dst_part_vi.copyAdvanced(isel, src_part_vi.?, .{
+ .ty = root.ty,
+ .dst_vi = root.dst_vi,
+ .dst_offset = root.dst_offset + dst_part_offset,
+ .src_vi = root.src_vi,
+ .src_offset = root.src_offset + dst_part_offset,
+ });
+ }
+ }
+
+ const AddOrSubtractOptions = struct {
+ overflow: Overflow,
+
+ const Overflow = union(enum) {
+ @"unreachable",
+ panic: Zcu.SimplePanicId,
+ wrap,
+ ra: Register.Alias,
+
+ fn defCond(overflow: Overflow, isel: *Select, cond: codegen.aarch64.encoding.ConditionCode) !void {
+ switch (overflow) {
+ .@"unreachable" => unreachable,
+ .panic => |panic_id| {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.@"b."(
+ cond.invert(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ },
+ .wrap => {},
+ .ra => |overflow_ra| try isel.emit(.csinc(overflow_ra.w(), .wzr, .wzr, cond.invert())),
+ }
+ }
+ };
+ };
+ fn addOrSubtract(
+ res_vi: Value.Index,
+ isel: *Select,
+ ty: ZigType,
+ lhs_vi: Value.Index,
+ op: codegen.aarch64.encoding.Instruction.AddSubtractOp,
+ rhs_vi: Value.Index,
+ opts: AddOrSubtractOptions,
+ ) !void {
+ const zcu = isel.pt.zcu;
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
+ var part_offset = res_vi.size(isel);
+ var need_wrap = switch (opts.overflow) {
+ .@"unreachable" => false,
+ .panic, .wrap, .ra => true,
+ };
+ var need_carry = switch (opts.overflow) {
+ .@"unreachable", .wrap => false,
+ .panic, .ra => true,
+ };
+ while (part_offset > 0) : (need_wrap = false) {
+ const part_size = @min(part_offset, 8);
+ part_offset -= part_size;
+ var wrapped_res_part_it = res_vi.field(ty, part_offset, part_size);
+ const wrapped_res_part_vi = try wrapped_res_part_it.only(isel);
+ const wrapped_res_part_ra = try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
+ const unwrapped_res_part_ra = unwrapped_res_part_ra: {
+ if (!need_wrap) break :unwrapped_res_part_ra wrapped_res_part_ra;
+ if (int_info.bits % 32 == 0) {
+ try opts.overflow.defCond(isel, switch (int_info.signedness) {
+ .signed => .vs,
+ .unsigned => switch (op) {
+ .add => .cs,
+ .sub => .cc,
+ },
+ });
+ break :unwrapped_res_part_ra wrapped_res_part_ra;
+ }
+ need_carry = false;
+ const wrapped_part_ra, const unwrapped_part_ra = part_ra: switch (opts.overflow) {
+ .@"unreachable" => unreachable,
+ .panic, .ra => switch (int_info.signedness) {
+ .signed => {
+ try opts.overflow.defCond(isel, .ne);
+ const wrapped_part_ra = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| res_part_ra,
+ .zr => try isel.allocIntReg(),
+ };
+ errdefer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
+ const unwrapped_part_ra = unwrapped_part_ra: {
+ const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| isel.lockReg(res_part_ra),
+ .zr => .empty,
+ };
+ defer wrapped_res_part_lock.unlock(isel);
+ break :unwrapped_part_ra try isel.allocIntReg();
+ };
+ errdefer isel.freeReg(unwrapped_part_ra);
+ switch (part_size) {
+ else => unreachable,
+ 1...4 => try isel.emit(.subs(.wzr, wrapped_part_ra.w(), .{ .register = unwrapped_part_ra.w() })),
+ 5...8 => try isel.emit(.subs(.xzr, wrapped_part_ra.x(), .{ .register = unwrapped_part_ra.x() })),
+ }
+ break :part_ra .{ wrapped_part_ra, unwrapped_part_ra };
+ },
+ .unsigned => {
+ const unwrapped_part_ra = unwrapped_part_ra: {
+ const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| isel.lockReg(res_part_ra),
+ .zr => .empty,
+ };
+ defer wrapped_res_part_lock.unlock(isel);
+ break :unwrapped_part_ra try isel.allocIntReg();
+ };
+ errdefer isel.freeReg(unwrapped_part_ra);
+ const bit: u6 = @truncate(int_info.bits);
+ switch (opts.overflow) {
+ .@"unreachable", .wrap => unreachable,
+ .panic => |panic_id| {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.tbz(
+ switch (bit) {
+ 0, 32 => unreachable,
+ 1...31 => unwrapped_part_ra.w(),
+ 33...63 => unwrapped_part_ra.x(),
+ },
+ bit,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ },
+ .ra => |overflow_ra| try isel.emit(switch (bit) {
+ 0, 32 => unreachable,
+ 1...31 => .ubfm(overflow_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = bit,
+ .imms = bit,
+ }),
+ 33...63 => .ubfm(overflow_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = bit,
+ .imms = bit,
+ }),
+ }),
+ }
+ break :part_ra .{ wrapped_res_part_ra, unwrapped_part_ra };
+ },
+ },
+ .wrap => .{ wrapped_res_part_ra, wrapped_res_part_ra },
+ };
+ defer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
+ errdefer if (unwrapped_part_ra != wrapped_res_part_ra) isel.freeReg(unwrapped_part_ra);
+ if (wrapped_part_ra != .zr) try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (int_info.signedness) {
+ .signed => .sbfm(wrapped_part_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ .unsigned => .ubfm(wrapped_part_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ },
+ 5...8 => switch (int_info.signedness) {
+ .signed => .sbfm(wrapped_part_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ .unsigned => .ubfm(wrapped_part_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ },
+ });
+ break :unwrapped_res_part_ra unwrapped_part_ra;
+ };
+ defer if (unwrapped_res_part_ra != wrapped_res_part_ra) isel.freeReg(unwrapped_res_part_ra);
+ var lhs_part_it = lhs_vi.field(ty, part_offset, part_size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, part_offset, part_size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (op) {
+ .add => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .add(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ true => .adds(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ },
+ else => switch (need_carry) {
+ false => .adc(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ true => .adcs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ },
+ },
+ .sub => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .sub(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ true => .subs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ },
+ else => switch (need_carry) {
+ false => .sbc(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ true => .sbcs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ },
+ },
+ },
+ 5...8 => switch (op) {
+ .add => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .add(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ true => .adds(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ else => switch (need_carry) {
+ false => .adc(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ true => .adcs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ },
+ },
+ .sub => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .sub(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ true => .subs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ else => switch (need_carry) {
+ false => .sbc(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ true => .sbcs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ },
+ },
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ need_carry = true;
+ }
+ }
+
+ const MemoryAccessOptions = struct {
+ root_vi: Value.Index = .free,
+ offset: u64 = 0,
+ @"volatile": bool = false,
+ split: bool = true,
+ wrap: ?std.builtin.Type.Int = null,
+ expected_live_registers: *const LiveRegisters = &.initFill(.free),
+ };
+
+ fn load(
+ vi: Value.Index,
+ isel: *Select,
+ root_ty: ZigType,
+ base_ra: Register.Alias,
+ opts: MemoryAccessOptions,
+ ) !bool {
+ const root_vi = switch (opts.root_vi) {
+ _ => |root_vi| root_vi,
+ .allocating => unreachable,
+ .free => vi,
+ };
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| only: {
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size > @as(@TypeOf(part_size), if (part_is_vector) 16 else 8)) {
+ if (!opts.split) return false;
+ var subpart_it = root_vi.field(root_ty, opts.offset, part_size - 1);
+ _ = try subpart_it.next(isel);
+ part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+ break :only;
+ }
+ const part_ra = if (try part_vi.defReg(isel)) |part_ra|
+ part_ra
+ else if (opts.@"volatile")
+ .zr
+ else
+ return false;
+ if (part_ra != .zr) {
+ const live_vi = isel.live_registers.getPtr(part_ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ }
+ if (opts.wrap) |int_info| switch (int_info.bits) {
+ else => unreachable,
+ 1...7, 9...15, 17...31 => |bits| try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(part_ra.w(), part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(part_ra.w(), part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 8, 16, 32 => {},
+ 33...63 => |bits| try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(part_ra.x(), part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(part_ra.x(), part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ };
+ try isel.loadReg(part_ra, part_size, part_vi.signedness(isel), base_ra, opts.offset);
+ if (part_ra != .zr) {
+ const live_vi = isel.live_registers.getPtr(part_ra);
+ assert(live_vi.* == .allocating);
+ switch (opts.expected_live_registers.get(part_ra)) {
+ _ => {},
+ .allocating => unreachable,
+ .free => live_vi.* = .free,
+ }
+ }
+ return true;
+ }
+ var used = false;
+ while (part_it.next()) |part_vi| used |= try part_vi.load(isel, root_ty, base_ra, .{
+ .root_vi = root_vi,
+ .offset = opts.offset + part_vi.get(isel).offset_from_parent,
+ .@"volatile" = opts.@"volatile",
+ .split = opts.split,
+ .wrap = switch (part_it.remaining) {
+ else => null,
+ 0 => if (opts.wrap) |wrap| .{
+ .signedness = wrap.signedness,
+ .bits = @intCast(wrap.bits - 8 * part_vi.position(isel)[0]),
+ } else null,
+ },
+ .expected_live_registers = opts.expected_live_registers,
+ });
+ return used;
+ }
+
+ fn store(
+ vi: Value.Index,
+ isel: *Select,
+ root_ty: ZigType,
+ base_ra: Register.Alias,
+ opts: MemoryAccessOptions,
+ ) !void {
+ const root_vi = switch (opts.root_vi) {
+ _ => |root_vi| root_vi,
+ .allocating => unreachable,
+ .free => vi,
+ };
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| only: {
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size > @as(@TypeOf(part_size), if (part_is_vector) 16 else 8)) {
+ if (!opts.split) return;
+ var subpart_it = root_vi.field(root_ty, opts.offset, part_size - 1);
+ _ = try subpart_it.next(isel);
+ part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+ break :only;
+ }
+ const part_mat = try part_vi.matReg(isel);
+ try isel.storeReg(part_mat.ra, part_size, base_ra, opts.offset);
+ return part_mat.finish(isel);
+ }
+ while (part_it.next()) |part_vi| try part_vi.store(isel, root_ty, base_ra, .{
+ .root_vi = root_vi,
+ .offset = opts.offset + part_vi.get(isel).offset_from_parent,
+ .@"volatile" = opts.@"volatile",
+ .split = opts.split,
+ .wrap = switch (part_it.remaining) {
+ else => null,
+ 0 => if (opts.wrap) |wrap| .{
+ .signedness = wrap.signedness,
+ .bits = @intCast(wrap.bits - 8 * part_vi.position(isel)[0]),
+ } else null,
+ },
+ .expected_live_registers = opts.expected_live_registers,
+ });
+ }
+
+ fn mat(vi: Value.Index, isel: *Select) !void {
+ if (false) {
+ var part_it: Value.PartIterator = if (vi.size(isel) > 8) vi.parts(isel) else .initOne(vi);
+ if (part_it.only()) |part_vi| only: {
+ const mat_ra = mat_ra: {
+ if (part_vi.register(isel)) |mat_ra| {
+ part_vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(mat_ra);
+ assert(live_vi.* == part_vi);
+ live_vi.* = .allocating;
+ break :mat_ra mat_ra;
+ }
+ if (part_vi.hint(isel)) |hint_ra| {
+ const live_vi = isel.live_registers.getPtr(hint_ra);
+ if (live_vi.* == .free) {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(hint_ra);
+ break :mat_ra hint_ra;
+ }
+ }
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size <= @as(@TypeOf(part_size), if (part_is_vector) 16 else 8))
+ switch (if (part_is_vector) isel.tryAllocVecReg() else isel.tryAllocIntReg()) {
+ .allocated => |ra| break :mat_ra ra,
+ .fill_candidate, .out_of_registers => {},
+ };
+ _, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = parent_vi.allocStackSlot(isel) }),
+ else => {},
+ }
+ break :only;
+ };
+ assert(isel.live_registers.get(mat_ra) == .allocating);
+ try Value.Materialize.finish(.{ .vi = part_vi, .ra = mat_ra }, isel);
+ } else while (part_it.next()) |part_vi| try part_vi.mat(isel);
+ } else {
+ _, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = parent_vi.allocStackSlot(isel) }),
+ else => {},
+ }
+ }
+ }
+
+ fn matReg(vi: Value.Index, isel: *Select) !Value.Materialize {
+ const mat_ra = mat_ra: {
+ if (vi.register(isel)) |mat_ra| {
+ vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(mat_ra);
+ assert(live_vi.* == vi);
+ live_vi.* = .allocating;
+ break :mat_ra mat_ra;
+ }
+ if (vi.hint(isel)) |hint_ra| {
+ const live_vi = isel.live_registers.getPtr(hint_ra);
+ if (live_vi.* == .free) {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(hint_ra);
+ break :mat_ra hint_ra;
+ }
+ }
+ break :mat_ra if (vi.isVector(isel)) try isel.allocVecReg() else try isel.allocIntReg();
+ };
+ assert(isel.live_registers.get(mat_ra) == .allocating);
+ return .{ .vi = vi, .ra = mat_ra };
+ }
+
+ fn defAddr(
+ def_vi: Value.Index,
+ isel: *Select,
+ def_ty: ZigType,
+ wrap: ?std.builtin.Type.Int,
+ expected_live_registers: *const LiveRegisters,
+ ) !?void {
+ if (!def_vi.isUsed(isel)) return null;
+ const offset_from_parent: i65, const parent_vi = def_vi.valueParent(isel);
+ const stack_slot, const allocated = switch (parent_vi.parent(isel)) {
+ .unallocated => .{ parent_vi.allocStackSlot(isel), true },
+ .stack_slot => |stack_slot| .{ stack_slot, false },
+ else => unreachable,
+ };
+ _ = try def_vi.load(isel, def_ty, stack_slot.base, .{
+ .offset = @intCast(stack_slot.offset + offset_from_parent),
+ .split = false,
+ .wrap = wrap,
+ .expected_live_registers = expected_live_registers,
+ });
+ if (allocated) parent_vi.setParent(isel, .{ .stack_slot = stack_slot });
+ }
+
+ fn defReg(def_vi: Value.Index, isel: *Select) !?Register.Alias {
+ var vi = def_vi;
+ var offset: i65 = 0;
+ var def_ra: ?Register.Alias = null;
+ while (true) {
+ if (vi.register(isel)) |ra| {
+ vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == vi);
+ if (def_ra == null and vi != def_vi) {
+ var part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+
+ const first_part_vi = part_it.next().?;
+ const first_part_value = first_part_vi.get(isel);
+ assert(first_part_value.offset_from_parent == 0);
+ first_part_value.location_payload.small.register = ra;
+ live_vi.* = first_part_vi;
+
+ const vi_size = vi.size(isel);
+ while (part_it.next()) |part_vi| {
+ const part_offset, const part_size = part_vi.position(isel);
+ const part_mat = try part_vi.matReg(isel);
+ try isel.emit(if (part_vi.isVector(isel)) emit: {
+ assert(part_offset == 0 and part_size == vi_size);
+ break :emit size: switch (vi_size) {
+ else => unreachable,
+ 2 => if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fmov(ra.h(), .{ .register = part_mat.ra.h() })
+ else
+ continue :size 4,
+ 4 => .fmov(ra.s(), .{ .register = part_mat.ra.s() }),
+ 8 => .fmov(ra.d(), .{ .register = part_mat.ra.d() }),
+ 16 => .orr(ra.@"16b"(), part_mat.ra.@"16b"(), .{ .register = part_mat.ra.@"16b"() }),
+ };
+ } else switch (vi_size) {
+ else => unreachable,
+ 1...4 => .bfm(ra.w(), part_mat.ra.w(), .{
+ .N = .word,
+ .immr = @as(u5, @truncate(32 - 8 * part_offset)),
+ .imms = @intCast(8 * part_size - 1),
+ }),
+ 5...8 => .bfm(ra.x(), part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @as(u6, @truncate(64 - 8 * part_offset)),
+ .imms = @intCast(8 * part_size - 1),
+ }),
+ });
+ try part_mat.finish(isel);
+ }
+ vi = def_vi;
+ offset = 0;
+ continue;
+ }
+ live_vi.* = .free;
+ def_ra = ra;
+ }
+ offset += vi.get(isel).offset_from_parent;
+ switch (vi.parent(isel)) {
+ else => unreachable,
+ .unallocated => return def_ra,
+ .stack_slot => |stack_slot| {
+ offset += stack_slot.offset;
+ const def_is_vector = def_vi.isVector(isel);
+ const ra = def_ra orelse if (def_is_vector) try isel.allocVecReg() else try isel.allocIntReg();
+ defer if (def_ra == null) isel.freeReg(ra);
+ try isel.storeReg(ra, def_vi.size(isel), stack_slot.base, offset);
+ return ra;
+ },
+ .value => |parent_vi| vi = parent_vi,
+ }
+ }
+ }
+
+ pub fn liveIn(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ const src_live_vi = isel.live_registers.getPtr(src_ra);
+ if (vi.register(isel)) |dst_ra| {
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ assert(dst_live_vi.* == vi);
+ if (dst_ra == src_ra) {
+ src_live_vi.* = .allocating;
+ return;
+ }
+ dst_live_vi.* = .allocating;
+ if (try isel.fill(src_ra)) {
+ assert(src_live_vi.* == .free);
+ src_live_vi.* = .allocating;
+ }
+ assert(src_live_vi.* == .allocating);
+ try isel.emit(switch (dst_ra.isVector()) {
+ false => switch (src_ra.isVector()) {
+ false => switch (vi.size(isel)) {
+ else => unreachable,
+ 1...4 => .orr(dst_ra.w(), .wzr, .{ .register = src_ra.w() }),
+ 5...8 => .orr(dst_ra.x(), .xzr, .{ .register = src_ra.x() }),
+ },
+ true => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.w(), .{ .register = src_ra.h() }),
+ 4 => .fmov(dst_ra.w(), .{ .register = src_ra.s() }),
+ 8 => .fmov(dst_ra.x(), .{ .register = src_ra.d() }),
+ },
+ },
+ true => switch (src_ra.isVector()) {
+ false => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = src_ra.w() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = src_ra.w() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = src_ra.x() }),
+ },
+ true => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = src_ra.h() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = src_ra.s() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = src_ra.d() }),
+ 16 => .orr(dst_ra.@"16b"(), src_ra.@"16b"(), .{ .register = src_ra.@"16b"() }),
+ },
+ },
+ });
+ assert(dst_live_vi.* == .allocating);
+ dst_live_vi.* = switch (expected_live_registers.get(dst_ra)) {
+ _ => .allocating,
+ .allocating => .allocating,
+ .free => .free,
+ };
+ } else if (try isel.fill(src_ra)) {
+ assert(src_live_vi.* == .free);
+ src_live_vi.* = .allocating;
+ }
+ assert(src_live_vi.* == .allocating);
+ vi.get(isel).location_payload.small.register = src_ra;
+ }
+
+ pub fn defLiveIn(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ try vi.liveIn(isel, src_ra, expected_live_registers);
+ const offset_from_parent, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => {},
+ .stack_slot => |stack_slot| if (stack_slot.base != Register.Alias.fp) try isel.storeReg(
+ src_ra,
+ vi.size(isel),
+ stack_slot.base,
+ @as(i65, stack_slot.offset) + offset_from_parent,
+ ),
+ else => unreachable,
+ }
+ try vi.spillReg(isel, src_ra, 0, expected_live_registers);
+ }
+
+ fn spillReg(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ start_offset: u64,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ assert(isel.live_registers.get(src_ra) == .allocating);
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| {
+ const dst_ra = part_vi.register(isel) orelse return;
+ if (dst_ra == src_ra) return;
+ const part_size = part_vi.size(isel);
+ const part_ra = if (part_vi.isVector(isel)) try isel.allocIntReg() else dst_ra;
+ defer if (part_ra != dst_ra) isel.freeReg(part_ra);
+ if (part_ra != dst_ra) try isel.emit(switch (part_size) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = part_ra.w() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = part_ra.w() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = part_ra.x() }),
+ });
+ try isel.emit(switch (start_offset + part_size) {
+ else => unreachable,
+ 1...4 => |end_offset| switch (part_vi.signedness(isel)) {
+ .signed => .sbfm(part_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(part_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ 5...8 => |end_offset| switch (part_vi.signedness(isel)) {
+ .signed => .sbfm(part_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(part_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ });
+ const value_ra = &part_vi.get(isel).location_payload.small.register;
+ assert(value_ra.* == dst_ra);
+ value_ra.* = .zr;
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ assert(dst_live_vi.* == part_vi);
+ dst_live_vi.* = switch (expected_live_registers.get(dst_ra)) {
+ _ => .allocating,
+ .allocating => unreachable,
+ .free => .free,
+ };
+ } else while (part_it.next()) |part_vi| try part_vi.spillReg(
+ isel,
+ src_ra,
+ start_offset + part_vi.get(isel).offset_from_parent,
+ expected_live_registers,
+ );
+ }
+
+ fn liveOut(vi: Value.Index, isel: *Select, ra: Register.Alias) !void {
+ assert(try isel.fill(ra));
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ try Value.Materialize.finish(.{ .vi = vi, .ra = ra }, isel);
+ }
+
+ fn allocStackSlot(vi: Value.Index, isel: *Select) Value.Indirect {
+ const offset = vi.alignment(isel).forward(isel.stack_size);
+ isel.stack_size = @intCast(offset + vi.size(isel));
+ tracking_log.debug("${d} -> [sp, #0x{x}]", .{ @intFromEnum(vi), @abs(offset) });
+ return .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ };
+ }
+
+ fn address(initial_vi: Value.Index, isel: *Select, initial_offset: u64, ptr_ra: Register.Alias) !void {
+ var vi = initial_vi;
+ var offset: i65 = vi.get(isel).offset_from_parent + initial_offset;
+ parent: switch (vi.parent(isel)) {
+ .unallocated => {
+ const stack_slot = vi.allocStackSlot(isel);
+ vi.setParent(isel, .{ .stack_slot = stack_slot });
+ continue :parent .{ .stack_slot = stack_slot };
+ },
+ .stack_slot => |stack_slot| {
+ offset += stack_slot.offset;
+ const lo12: u12 = @truncate(@abs(offset) >> 0);
+ const hi12: u12 = @intCast(@abs(offset) >> 12);
+ if (hi12 > 0) try isel.emit(if (offset >= 0) .add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else stack_slot.base.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ) else .sub(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else stack_slot.base.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(if (offset >= 0) .add(
+ ptr_ra.x(),
+ stack_slot.base.x(),
+ .{ .immediate = lo12 },
+ ) else .sub(
+ ptr_ra.x(),
+ stack_slot.base.x(),
+ .{ .immediate = lo12 },
+ ));
+ },
+ .address => |address_vi| try address_vi.liveOut(isel, ptr_ra),
+ .value => |parent_vi| {
+ vi = parent_vi;
+ offset += vi.get(isel).offset_from_parent;
+ continue :parent vi.parent(isel);
+ },
+ .constant => |constant| {
+ const pt = isel.pt;
+ const zcu = pt.zcu;
+ switch (true) {
+ false => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.adr(ptr_ra.x(), 0));
+ },
+ true => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ },
+ }
+ },
+ }
+ }
+ };
+
+ pub const PartIterator = struct {
+ vi: Value.Index,
+ remaining: Value.PartsLen,
+
+ fn initOne(vi: Value.Index) PartIterator {
+ return .{ .vi = vi, .remaining = 1 };
+ }
+
+ pub fn next(it: *PartIterator) ?Value.Index {
+ if (it.remaining == 0) return null;
+ it.remaining -= 1;
+ defer it.vi = @enumFromInt(@intFromEnum(it.vi) + 1);
+ return it.vi;
+ }
+
+ pub fn peek(it: PartIterator) ?Value.Index {
+ var it_mut = it;
+ return it_mut.next();
+ }
+
+ pub fn only(it: PartIterator) ?Value.Index {
+ return if (it.remaining == 1) it.vi else null;
+ }
+ };
+
+ const FieldPartIterator = struct {
+ vi: Value.Index,
+ ty: ZigType,
+ field_offset: u64,
+ field_size: u64,
+ next_offset: u64,
+
+ fn next(it: *FieldPartIterator, isel: *Select) !?struct { offset: u64, vi: Value.Index } {
+ const next_offset = it.next_offset;
+ const next_part_size = it.field_size - next_offset;
+ if (next_part_size == 0) return null;
+ var next_part_offset = it.field_offset + next_offset;
+
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ var vi = it.vi;
+ var ty = it.ty;
+ var ty_size = vi.size(isel);
+ assert(ty_size == ty.abiSize(zcu));
+ var offset: u64 = 0;
+ var size = ty_size;
+ assert(next_part_offset + next_part_size <= size);
+ while (next_part_offset > 0 or next_part_size < size) {
+ const part_vi = vi.partAtOffset(isel, next_part_offset);
+ if (part_vi != vi) {
+ vi = part_vi;
+ const part_offset, size = part_vi.position(isel);
+ assert(part_offset <= next_part_offset and part_offset + size > next_part_offset);
+ offset += part_offset;
+ next_part_offset -= part_offset;
+ continue;
+ }
+ try isel.values.ensureUnusedCapacity(zcu.gpa, Value.max_parts);
+ type_key: switch (ip.indexToKey(ty.toIntern())) {
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ .int_type => |int_type| switch (int_type.bits) {
+ 0 => unreachable,
+ 1...64 => unreachable,
+ 65...256 => |bits| if (offset == 0 and size == ty_size) {
+ const parts_len = std.math.divCeil(u16, bits, 64) catch unreachable;
+ vi.setParts(isel, @intCast(parts_len));
+ for (0..parts_len) |part_index| _ = vi.addPart(isel, 8 * part_index, 8);
+ },
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ },
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .one, .many, .c => unreachable,
+ .slice => if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, 8);
+ _ = vi.addPart(isel, 8, 8);
+ } else unreachable,
+ },
+ .opt_type => |child_type| if (ty.optionalReprIsPayload(zcu))
+ continue :type_key ip.indexToKey(child_type)
+ else switch (ZigType.fromInterned(child_type).abiSize(zcu)) {
+ 0...8, 16 => |child_size| if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, child_size);
+ _ = vi.addPart(isel, child_size, 1);
+ } else unreachable,
+ 9...15 => |child_size| if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, 8);
+ _ = vi.addPart(isel, 8, ty_size - 8);
+ } else if (offset == 8 and size == ty_size - 8) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, child_size - 8);
+ _ = vi.addPart(isel, child_size - 8, 1);
+ } else unreachable,
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ },
+ .array_type => |array_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ const array_len = array_type.lenIncludingSentinel();
+ if (array_len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64 };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ const elem_ty: ZigType = .fromInterned(array_type.child);
+ const elem_size = elem_ty.abiSize(zcu);
+ const elem_signedness = if (ty.isAbiInt(zcu)) elem_signedness: {
+ const elem_int_info = elem_ty.intInfo(zcu);
+ break :elem_signedness if (elem_int_info.bits <= 16) elem_int_info.signedness else null;
+ } else null;
+ const elem_is_vector = elem_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, elem_ty.toIntern()) != null;
+ var elem_end: u64 = 0;
+ for (0..@intCast(array_len)) |_| {
+ const elem_begin = elem_end;
+ if (elem_begin >= offset + size) break;
+ elem_end = elem_begin + elem_size;
+ if (elem_end <= offset) continue;
+ if (offset >= elem_begin and offset + size <= elem_begin + elem_size) {
+ ty = elem_ty;
+ ty_size = elem_size;
+ offset -= elem_begin;
+ continue :type_key ip.indexToKey(elem_ty.toIntern());
+ }
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = elem_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ continue;
+ }
+ parts[parts_len] = .{ .offset = elem_begin, .size = elem_size };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (elem_signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (elem_is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .anyframe_type => unreachable,
+ .error_union_type => |error_union_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if ((std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const Part = struct { offset: u64, size: u64, signedness: ?std.builtin.Signedness, is_vector: bool };
+ var parts: [2]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ for (0..2) |field_index| {
+ const field_ty: ZigType, const field_begin = switch (@as(enum { error_set, payload }, switch (field_index) {
+ 0 => if (error_set_offset < payload_offset) .error_set else .payload,
+ 1 => if (error_set_offset < payload_offset) .payload else .error_set,
+ else => unreachable,
+ })) {
+ .error_set => .{ .fromInterned(error_union_type.error_set_type), error_set_offset },
+ .payload => .{ payload_ty, payload_offset },
+ };
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_signedness = if (field_ty.isAbiInt(zcu)) field_signedness: {
+ const field_int_info = field_ty.intInfo(zcu);
+ break :field_signedness if (field_int_info.bits <= 16) field_int_info.signedness else null;
+ } else null;
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.signedness = null;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{
+ .offset = field_begin,
+ .size = field_size,
+ .signedness = field_signedness,
+ .is_vector = field_is_vector,
+ };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16, .f32, .f64, .f128, .c_longdouble => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ .f80 => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 80 } },
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ => continue :type_key .{ .int_type = ty.intInfo(zcu) },
+ .anyopaque,
+ .void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .null,
+ .undefined,
+ .enum_literal,
+ .adhoc_inferred_error_set,
+ .generic_poison,
+ => unreachable,
+ .bool => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 1 } },
+ .anyerror => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ } },
+ },
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {},
+ .@"packed" => continue :type_key .{
+ .int_type = ip.indexToKey(loaded_struct.backingIntTypeUnordered(ip)).int_type,
+ },
+ }
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if (loaded_struct.field_types.len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64, signedness: ?std.builtin.Signedness, is_vector: bool };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ const field_begin = switch (loaded_struct.fieldAlign(ip, field_index)) {
+ .none => field_ty.abiAlignment(zcu),
+ else => |field_align| field_align,
+ }.forward(field_end);
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_signedness = if (field_ty.isAbiInt(zcu)) field_signedness: {
+ const field_int_info = field_ty.intInfo(zcu);
+ break :field_signedness if (field_int_info.bits <= 16) field_int_info.signedness else null;
+ } else null;
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.signedness = null;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{
+ .offset = field_begin,
+ .size = field_size,
+ .signedness = field_signedness,
+ .is_vector = field_is_vector,
+ };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .tuple_type => |tuple_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if (tuple_type.types.len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64, is_vector: bool };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip)) |field_type, field_value| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ const field_begin = field_ty.abiAlignment(zcu).forward(field_end);
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{ .offset = field_begin, .size = field_size, .is_vector = field_is_vector };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .union_type => {
+ const loaded_union = ip.loadUnionType(ty.toIntern());
+ switch (loaded_union.flagsUnordered(ip).layout) {
+ .auto, .@"extern" => {},
+ .@"packed" => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = @intCast(ty.bitSize(zcu)),
+ } },
+ }
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if ((std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const union_layout = ZigType.getUnionLayout(loaded_union, zcu);
+ const alignment = vi.alignment(isel);
+ const tag_offset = union_layout.tagOffset();
+ const payload_offset = union_layout.payloadOffset();
+ const Part = struct { offset: u64, size: u64, signedness: ?std.builtin.Signedness };
+ var parts: [2]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ for (0..2) |field_index| {
+ const field: enum { tag, payload } = switch (field_index) {
+ 0 => if (tag_offset < payload_offset) .tag else .payload,
+ 1 => if (tag_offset < payload_offset) .payload else .tag,
+ else => unreachable,
+ };
+ const field_size, const field_begin = switch (field) {
+ .tag => .{ union_layout.tag_size, tag_offset },
+ .payload => .{ union_layout.payload_size, payload_offset },
+ };
+ if (field_begin >= offset + size) break;
+ if (field_size == 0) continue;
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ const field_signedness = field_signedness: switch (field) {
+ .tag => {
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = .fromInterned(loaded_union.enum_tag_ty);
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(loaded_union.enum_tag_ty);
+ }
+ break :field_signedness ip.indexToKey(loaded_union.loadTagType(ip).tag_ty).int_type.signedness;
+ },
+ .payload => null,
+ };
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.signedness = null;
+ continue;
+ }
+ parts[parts_len] = .{
+ .offset = field_begin,
+ .size = field_size,
+ .signedness = field_signedness,
+ };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ }
+ },
+ .opaque_type, .func_type => continue :type_key .{ .simple_type = .anyopaque },
+ .enum_type => continue :type_key ip.indexToKey(ip.loadEnumType(ty.toIntern()).tag_ty),
+ .error_set_type,
+ .inferred_error_set_type,
+ => continue :type_key .{ .simple_type = .anyerror },
+ .undef,
+ .simple_value,
+ .variable,
+ .@"extern",
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable, // values, not types
+ }
+ }
+ it.next_offset = next_offset + size;
+ return .{ .offset = next_part_offset - next_offset, .vi = vi };
+ }
+
+ fn only(it: *FieldPartIterator, isel: *Select) !?Value.Index {
+ const part = try it.next(isel);
+ assert(part.?.offset == 0);
+ return if (try it.next(isel)) |_| null else part.?.vi;
+ }
+ };
+
+ const Materialize = struct {
+ vi: Value.Index,
+ ra: Register.Alias,
+
+ fn finish(mat: Value.Materialize, isel: *Select) error{ OutOfMemory, CodegenFail }!void {
+ const live_vi = isel.live_registers.getPtr(mat.ra);
+ assert(live_vi.* == .allocating);
+ var vi = mat.vi;
+ var offset: u64 = 0;
+ const size = mat.vi.size(isel);
+ free: while (true) {
+ if (vi.register(isel)) |ra| {
+ if (ra != mat.ra) break :free try isel.emit(if (vi == mat.vi) if (mat.ra.isVector()) switch (size) {
+ else => unreachable,
+ 2 => .fmov(mat.ra.h(), .{ .register = ra.h() }),
+ 4 => .fmov(mat.ra.s(), .{ .register = ra.s() }),
+ 8 => .fmov(mat.ra.d(), .{ .register = ra.d() }),
+ 16 => .orr(mat.ra.@"16b"(), ra.@"16b"(), .{ .register = ra.@"16b"() }),
+ } else switch (size) {
+ else => unreachable,
+ 1...4 => .orr(mat.ra.w(), .wzr, .{ .register = ra.w() }),
+ 5...8 => .orr(mat.ra.x(), .xzr, .{ .register = ra.x() }),
+ } else switch (offset + size) {
+ else => unreachable,
+ 1...4 => |end_offset| switch (mat.vi.signedness(isel)) {
+ .signed => .sbfm(mat.ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(mat.ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ 5...8 => |end_offset| switch (mat.vi.signedness(isel)) {
+ .signed => .sbfm(mat.ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(mat.ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ });
+ mat.vi.get(isel).location_payload.small.register = mat.ra;
+ live_vi.* = mat.vi;
+ return;
+ }
+ offset += vi.get(isel).offset_from_parent;
+ switch (vi.parent(isel)) {
+ .unallocated => {
+ mat.vi.get(isel).location_payload.small.register = mat.ra;
+ live_vi.* = mat.vi;
+ return;
+ },
+ .stack_slot => |stack_slot| break :free try isel.loadReg(
+ mat.ra,
+ size,
+ mat.vi.signedness(isel),
+ stack_slot.base,
+ @as(i65, stack_slot.offset) + offset,
+ ),
+ .address => |base_vi| {
+ const base_mat = try base_vi.matReg(isel);
+ try isel.loadReg(mat.ra, size, mat.vi.signedness(isel), base_mat.ra, offset);
+ break :free try base_mat.finish(isel);
+ },
+ .value => |parent_vi| vi = parent_vi,
+ .constant => |initial_constant| {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ var constant = initial_constant.toIntern();
+ var constant_key = ip.indexToKey(constant);
+ while (true) {
+ constant_key: switch (constant_key) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .tuple_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+
+ .enum_literal,
+ .empty_enum_value,
+ .memoized_call,
+ => unreachable, // not a runtime value
+ .undef => break :free try isel.emit(if (mat.ra.isVector()) .movi(switch (size) {
+ else => unreachable,
+ 1...8 => mat.ra.@"8b"(),
+ 9...16 => mat.ra.@"16b"(),
+ }, 0xaa, .{ .lsl = 0 }) else switch (size) {
+ else => unreachable,
+ 1...4 => .orr(mat.ra.w(), .wzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ 5...8 => .orr(mat.ra.x(), .xzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ }),
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined, .void, .null, .empty_tuple, .@"unreachable" => unreachable,
+ .true => continue :constant_key .{ .int = .{
+ .ty = .bool_type,
+ .storage = .{ .u64 = 1 },
+ } },
+ .false => continue :constant_key .{ .int = .{
+ .ty = .bool_type,
+ .storage = .{ .u64 = 0 },
+ } },
+ },
+ .int => |int| break :free storage: switch (int.storage) {
+ .u64 => |imm| try isel.movImmediate(switch (size) {
+ else => unreachable,
+ 1...4 => mat.ra.w(),
+ 5...8 => mat.ra.x(),
+ }, @bitCast(std.math.shr(u64, imm, 8 * offset))),
+ .i64 => |imm| switch (size) {
+ else => unreachable,
+ 1...4 => try isel.movImmediate(mat.ra.w(), @as(u32, @bitCast(@as(i32, @truncate(std.math.shr(i64, imm, 8 * offset)))))),
+ 5...8 => try isel.movImmediate(mat.ra.x(), @bitCast(std.math.shr(i64, imm, 8 * offset))),
+ },
+ .big_int => |big_int| {
+ assert(size == 8);
+ var imm: u64 = 0;
+ const limb_bits = @bitSizeOf(std.math.big.Limb);
+ const limbs = @divExact(64, limb_bits);
+ var limb_index: usize = @intCast(@divExact(offset, @divExact(limb_bits, 8)) + limbs);
+ for (0..limbs) |_| {
+ limb_index -= 1;
+ if (limb_index >= big_int.limbs.len) continue;
+ if (limb_bits < 64) imm <<= limb_bits;
+ imm |= big_int.limbs[limb_index];
+ }
+ if (!big_int.positive) {
+ limb_index = @min(limb_index, big_int.limbs.len);
+ imm = while (limb_index > 0) {
+ limb_index -= 1;
+ if (big_int.limbs[limb_index] != 0) break ~imm;
+ } else -%imm;
+ }
+ try isel.movImmediate(mat.ra.x(), imm);
+ },
+ .lazy_align => |ty| continue :storage .{
+ .u64 = ZigType.fromInterned(ty).abiAlignment(zcu).toByteUnits().?,
+ },
+ .lazy_size => |ty| continue :storage .{
+ .u64 = ZigType.fromInterned(ty).abiSize(zcu),
+ },
+ },
+ .err => |err| continue :constant_key .{ .int = .{
+ .ty = err.ty,
+ .storage = .{ .u64 = ip.getErrorValueIfExists(err.name).? },
+ } },
+ .error_union => |error_union| {
+ const error_union_type = ip.indexToKey(error_union.ty).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_type.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+ if (offset >= error_set_offset and offset + size <= error_set_offset + error_set_size) {
+ offset -= error_set_offset;
+ continue :constant_key switch (error_union.val) {
+ .err_name => |err_name| .{ .err = .{
+ .ty = error_union_type.error_set_type,
+ .name = err_name,
+ } },
+ .payload => .{ .int = .{
+ .ty = error_union_type.error_set_type,
+ .storage = .{ .u64 = 0 },
+ } },
+ };
+ }
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+ if (offset >= payload_offset and offset + size <= payload_offset + payload_size) {
+ offset -= payload_offset;
+ switch (error_union.val) {
+ .err_name => continue :constant_key .{ .undef = error_union_type.payload_type },
+ .payload => |payload| {
+ constant = payload;
+ constant_key = ip.indexToKey(payload);
+ continue :constant_key constant_key;
+ },
+ }
+ }
+ },
+ .enum_tag => |enum_tag| continue :constant_key .{ .int = ip.indexToKey(enum_tag.int).int },
+ .float => |float| storage: switch (float.storage) {
+ .f16 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u16_type,
+ .storage = .{ .u64 = @as(u16, @bitCast(imm)) },
+ } };
+ const feat_fp16 = isel.target.cpu.has(.aarch64, .fullfp16);
+ if (feat_fp16) {
+ const Repr = std.math.FloatRepr(f16);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.h(), .{ .immediate = imm }));
+ }
+ const bits: u16 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ if (bits & std.math.maxInt(u8) == 0) break :free try isel.emit(.movi(
+ mat.ra.@"4h"(),
+ @intCast(@shrExact(bits, 8)),
+ .{ .lsl = 8 },
+ ));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(if (feat_fp16) mat.ra.h() else mat.ra.s(), .{ .register = temp_ra.w() }));
+ break :free try isel.movImmediate(temp_ra.w(), bits);
+ },
+ .f32 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u32_type,
+ .storage = .{ .u64 = @as(u32, @bitCast(imm)) },
+ } };
+ const Repr = std.math.FloatRepr(f32);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.s(), .{ .immediate = @floatCast(imm) }));
+ const bits: u32 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ if (bits & std.math.maxInt(u24) == 0) break :free try isel.emit(.movi(
+ mat.ra.@"2s"(),
+ @intCast(@shrExact(bits, 24)),
+ .{ .lsl = 24 },
+ ));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(mat.ra.s(), .{ .register = temp_ra.w() }));
+ break :free try isel.movImmediate(temp_ra.w(), bits);
+ },
+ .f64 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u64_type,
+ .storage = .{ .u64 = @as(u64, @bitCast(imm)) },
+ } };
+ const Repr = std.math.FloatRepr(f64);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.d(), .{ .immediate = @floatCast(imm) }));
+ const bits: u64 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(mat.ra.d(), .{ .register = temp_ra.x() }));
+ break :free try isel.movImmediate(temp_ra.x(), bits);
+ },
+ .f80 => |imm| break :free try isel.movImmediate(
+ mat.ra.x(),
+ @truncate(std.math.shr(u80, @bitCast(imm), 8 * offset)),
+ ),
+ .f128 => |imm| switch (ZigType.fromInterned(float.ty).floatBits(isel.target)) {
+ else => unreachable,
+ 16 => continue :storage .{ .f16 = @floatCast(imm) },
+ 32 => continue :storage .{ .f32 = @floatCast(imm) },
+ 64 => continue :storage .{ .f64 = @floatCast(imm) },
+ 128 => {
+ const bits: u128 = @bitCast(imm);
+ const hi64: u64 = @intCast(bits >> 64);
+ const lo64: u64 = @truncate(bits >> 0);
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ switch (hi64) {
+ 0 => {},
+ else => {
+ try isel.emit(.fmov(mat.ra.@"d[]"(1), .{ .register = temp_ra.x() }));
+ try isel.movImmediate(temp_ra.x(), hi64);
+ },
+ }
+ break :free switch (lo64) {
+ 0 => try isel.emit(.movi(switch (hi64) {
+ else => mat.ra.d(),
+ 0 => mat.ra.@"2d"(),
+ }, 0b00000000, .replicate)),
+ else => {
+ try isel.emit(.fmov(mat.ra.d(), .{ .register = temp_ra.x() }));
+ try isel.movImmediate(temp_ra.x(), lo64);
+ },
+ };
+ },
+ },
+ },
+ .ptr => |ptr| {
+ assert(offset == 0 and size == 8);
+ break :free switch (ptr.base_addr) {
+ .nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adr(mat.ra.x(), 0));
+ },
+ true => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.add(mat.ra.x(), mat.ra.x(), .{ .immediate = 0 }));
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adrp(mat.ra.x(), 0));
+ },
+ } else continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = isel.pt.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) },
+ } },
+ .uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adr(mat.ra.x(), 0));
+ },
+ true => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.add(mat.ra.x(), mat.ra.x(), .{ .immediate = 0 }));
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adrp(mat.ra.x(), 0));
+ },
+ } else continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = ZigType.fromInterned(uav.orig_ty).ptrAlignment(zcu).forward(0xaaaaaaaaaaaaaaaa) },
+ } },
+ .int => continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = ptr.byte_offset },
+ } },
+ .eu_payload => |base| {
+ var base_ptr = ip.indexToKey(base).ptr;
+ const eu_ty = ip.indexToKey(base_ptr.ty).ptr_type.child;
+ const payload_ty = ip.indexToKey(eu_ty).error_union_type.payload_type;
+ base_ptr.byte_offset += codegen.errUnionPayloadOffset(.fromInterned(payload_ty), zcu) + ptr.byte_offset;
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .opt_payload => |base| {
+ var base_ptr = ip.indexToKey(base).ptr;
+ base_ptr.byte_offset += ptr.byte_offset;
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .field => |field| {
+ var base_ptr = ip.indexToKey(field.base).ptr;
+ const agg_ty: ZigType = .fromInterned(ip.indexToKey(base_ptr.ty).ptr_type.child);
+ base_ptr.byte_offset += agg_ty.structFieldOffset(@intCast(field.index), zcu) + ptr.byte_offset;
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .comptime_alloc, .comptime_field, .arr_elem => unreachable,
+ };
+ },
+ .slice => |slice| switch (offset) {
+ 0 => continue :constant_key switch (ip.indexToKey(slice.ptr)) {
+ else => unreachable,
+ .undef => |undef| .{ .undef = undef },
+ .ptr => |ptr| .{ .ptr = ptr },
+ },
+ else => {
+ assert(offset == @divExact(isel.target.ptrBitWidth(), 8));
+ offset = 0;
+ continue :constant_key .{ .int = ip.indexToKey(slice.len).int };
+ },
+ },
+ .opt => |opt| {
+ const child_ty = ip.indexToKey(opt.ty).opt_type;
+ const child_size = ZigType.fromInterned(child_ty).abiSize(zcu);
+ if (offset == child_size and size == 1) {
+ offset = 0;
+ continue :constant_key .{ .simple_value = switch (opt.val) {
+ .none => .false,
+ else => .true,
+ } };
+ }
+ const opt_ty: ZigType = .fromInterned(opt.ty);
+ if (offset + size <= child_size) continue :constant_key switch (opt.val) {
+ .none => if (opt_ty.optionalReprIsPayload(zcu)) .{ .int = .{
+ .ty = opt.ty,
+ .storage = .{ .u64 = 0 },
+ } } else .{ .undef = child_ty },
+ else => |child| {
+ constant = child;
+ constant_key = ip.indexToKey(child);
+ continue :constant_key constant_key;
+ },
+ };
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(aggregate.ty)) {
+ else => unreachable,
+ .array_type => |array_type| {
+ const elem_size = ZigType.fromInterned(array_type.child).abiSize(zcu);
+ const elem_offset = @mod(offset, elem_size);
+ if (size <= elem_size - elem_offset) {
+ defer offset = elem_offset;
+ continue :constant_key switch (aggregate.storage) {
+ .bytes => |bytes| .{ .int = .{ .ty = .u8_type, .storage = .{
+ .u64 = bytes.toSlice(array_type.lenIncludingSentinel(), ip)[@intCast(@divFloor(offset, elem_size))],
+ } } },
+ .elems => |elems| {
+ constant = elems[@intCast(@divFloor(offset, elem_size))];
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ },
+ .repeated_elem => |repeated_elem| {
+ constant = repeated_elem;
+ constant_key = ip.indexToKey(repeated_elem);
+ continue :constant_key constant_key;
+ },
+ };
+ }
+ },
+ .vector_type => {},
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(aggregate.ty);
+ switch (loaded_struct.layout) {
+ .auto => {
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (offset >= field_offset and offset + size <= field_offset + field_size) {
+ offset -= field_offset;
+ constant = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ };
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ }
+ field_offset += field_size;
+ }
+ },
+ .@"extern", .@"packed" => {},
+ }
+ },
+ .tuple_type => |tuple_type| {
+ var field_offset: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip), 0..) |field_type, field_value, field_index| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ field_offset = field_ty.abiAlignment(zcu).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (offset >= field_offset and offset + size <= field_offset + field_size) {
+ offset -= field_offset;
+ constant = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ };
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ }
+ field_offset += field_size;
+ }
+ },
+ },
+ else => {},
+ }
+ var buffer: [16]u8 = @splat(0);
+ if (ZigType.fromInterned(constant_key.typeOf()).abiSize(zcu) <= buffer.len and
+ try isel.writeToMemory(.fromInterned(constant), &buffer))
+ {
+ constant_key = if (mat.ra.isVector()) .{ .float = switch (size) {
+ else => unreachable,
+ 2 => .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(std.mem.readInt(
+ u16,
+ buffer[@intCast(offset)..][0..2],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 4 => .{ .ty = .f32_type, .storage = .{ .f32 = @bitCast(std.mem.readInt(
+ u32,
+ buffer[@intCast(offset)..][0..4],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 8 => .{ .ty = .f64_type, .storage = .{ .f64 = @bitCast(std.mem.readInt(
+ u64,
+ buffer[@intCast(offset)..][0..8],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 16 => .{ .ty = .f128_type, .storage = .{ .f128 = @bitCast(std.mem.readInt(
+ u128,
+ buffer[@intCast(offset)..][0..16],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ } } else .{ .int = .{
+ .ty = .u64_type,
+ .storage = .{ .u64 = switch (size) {
+ else => unreachable,
+ inline 1...8 => |ct_size| std.mem.readInt(
+ @Type(.{ .int = .{ .signedness = .unsigned, .bits = 8 * ct_size } }),
+ buffer[@intCast(offset)..][0..ct_size],
+ isel.target.cpu.arch.endian(),
+ ),
+ } },
+ } };
+ offset = 0;
+ continue;
+ }
+ return isel.fail("unsupported value <{f}, {f}>", .{
+ isel.fmtType(.fromInterned(constant_key.typeOf())),
+ isel.fmtConstant(.fromInterned(constant)),
+ });
+ }
+ },
+ }
+ }
+ live_vi.* = .free;
+ }
+ };
+};
+fn initValue(isel: *Select, ty: ZigType) Value.Index {
+ const zcu = isel.pt.zcu;
+ return isel.initValueAdvanced(ty.abiAlignment(zcu), 0, ty.abiSize(zcu));
+}
+fn initValueAdvanced(
+ isel: *Select,
+ parent_alignment: InternPool.Alignment,
+ offset_from_parent: u64,
+ size: u64,
+) Value.Index {
+ defer isel.values.addOneAssumeCapacity().* = .{
+ .refs = 0,
+ .flags = .{
+ .alignment = .fromLog2Units(@min(parent_alignment.toLog2Units(), @ctz(offset_from_parent))),
+ .parent_tag = .unallocated,
+ .location_tag = if (size > 16) .large else .small,
+ .parts_len_minus_one = 0,
+ },
+ .offset_from_parent = offset_from_parent,
+ .parent_payload = .{ .unallocated = {} },
+ .location_payload = if (size > 16) .{ .large = .{
+ .size = size,
+ } } else .{ .small = .{
+ .size = @intCast(size),
+ .signedness = .unsigned,
+ .is_vector = false,
+ .hint = .zr,
+ .register = .zr,
+ } },
+ .parts = undefined,
+ };
+ return @enumFromInt(isel.values.items.len);
+}
+pub fn dumpValues(isel: *Select, which: enum { only_referenced, all }) void {
+ errdefer |err| @panic(@errorName(err));
+ const stderr = std.debug.lockStderrWriter(&.{});
+ defer std.debug.unlockStderrWriter();
+
+ const zcu = isel.pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(isel.nav_index);
+
+ var reverse_live_values: std.AutoArrayHashMapUnmanaged(Value.Index, std.ArrayListUnmanaged(Air.Inst.Index)) = .empty;
+ defer {
+ for (reverse_live_values.values()) |*list| list.deinit(gpa);
+ reverse_live_values.deinit(gpa);
+ }
+ {
+ try reverse_live_values.ensureTotalCapacity(gpa, isel.live_values.count());
+ var live_val_it = isel.live_values.iterator();
+ while (live_val_it.next()) |live_val_entry| switch (live_val_entry.value_ptr.*) {
+ _ => {
+ const gop = reverse_live_values.getOrPutAssumeCapacity(live_val_entry.value_ptr.*);
+ if (!gop.found_existing) gop.value_ptr.* = .empty;
+ try gop.value_ptr.append(gpa, live_val_entry.key_ptr.*);
+ },
+ .allocating, .free => unreachable,
+ };
+ }
+
+ var reverse_live_registers: std.AutoHashMapUnmanaged(Value.Index, Register.Alias) = .empty;
+ defer reverse_live_registers.deinit(gpa);
+ {
+ try reverse_live_registers.ensureTotalCapacity(gpa, @typeInfo(Register.Alias).@"enum".fields.len);
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => reverse_live_registers.putAssumeCapacityNoClobber(live_reg_entry.value.*, live_reg_entry.key),
+ .allocating, .free => {},
+ };
+ }
+
+ var roots: std.AutoArrayHashMapUnmanaged(Value.Index, u32) = .empty;
+ defer roots.deinit(gpa);
+ {
+ try roots.ensureTotalCapacity(gpa, isel.values.items.len);
+ var vi: Value.Index = @enumFromInt(isel.values.items.len);
+ while (@intFromEnum(vi) > 0) {
+ vi = @enumFromInt(@intFromEnum(vi) - 1);
+ if (which == .only_referenced and vi.get(isel).refs == 0) continue;
+ while (true) switch (vi.parent(isel)) {
+ .unallocated, .stack_slot, .constant => break,
+ .value => |parent_vi| vi = parent_vi,
+ .address => |address_vi| break roots.putAssumeCapacity(address_vi, 0),
+ };
+ roots.putAssumeCapacity(vi, 0);
+ }
+ }
+
+ try stderr.print("# Begin {s} Value Dump: {f}:\n", .{ @typeName(Select), nav.fqn.fmt(ip) });
+ while (roots.pop()) |root_entry| {
+ const vi = root_entry.key;
+ const value = vi.get(isel);
+ try stderr.splatByteAll(' ', 2 * (@as(usize, 1) + root_entry.value));
+ try stderr.print("${d}", .{@intFromEnum(vi)});
+ {
+ var first = true;
+ if (reverse_live_values.get(vi)) |aiis| for (aiis.items) |aii| {
+ if (aii == Block.main) {
+ try stderr.print("{s}%main", .{if (first) " <- " else ", "});
+ } else {
+ try stderr.print("{s}%{d}", .{ if (first) " <- " else ", ", @intFromEnum(aii) });
+ }
+ first = false;
+ };
+ if (reverse_live_registers.get(vi)) |ra| {
+ try stderr.print("{s}{s}", .{ if (first) " <- " else ", ", @tagName(ra) });
+ first = false;
+ }
+ }
+ try stderr.writeByte(':');
+ switch (value.flags.parent_tag) {
+ .unallocated => if (value.offset_from_parent != 0) try stderr.print(" +0x{x}", .{value.offset_from_parent}),
+ .stack_slot => {
+ try stderr.print(" [{s}, #{s}0x{x}", .{
+ @tagName(value.parent_payload.stack_slot.base),
+ if (value.parent_payload.stack_slot.offset < 0) "-" else "",
+ @abs(value.parent_payload.stack_slot.offset),
+ });
+ if (value.offset_from_parent != 0) try stderr.print("+0x{x}", .{value.offset_from_parent});
+ try stderr.writeByte(']');
+ },
+ .value => try stderr.print(" ${d}+0x{x}", .{ @intFromEnum(value.parent_payload.value), value.offset_from_parent }),
+ .address => try stderr.print(" ${d}[0x{x}]", .{ @intFromEnum(value.parent_payload.address), value.offset_from_parent }),
+ .constant => try stderr.print(" <{f}, {f}>", .{
+ isel.fmtType(value.parent_payload.constant.typeOf(zcu)),
+ isel.fmtConstant(value.parent_payload.constant),
+ }),
+ }
+ try stderr.print(" align({s})", .{@tagName(value.flags.alignment)});
+ switch (value.flags.location_tag) {
+ .large => try stderr.print(" size=0x{x} large", .{value.location_payload.large.size}),
+ .small => {
+ const loc = value.location_payload.small;
+ try stderr.print(" size=0x{x}", .{loc.size});
+ switch (loc.signedness) {
+ .unsigned => {},
+ .signed => try stderr.writeAll(" signed"),
+ }
+ if (loc.hint != .zr) try stderr.print(" hint={s}", .{@tagName(loc.hint)});
+ if (loc.register != .zr) try stderr.print(" loc={s}", .{@tagName(loc.register)});
+ },
+ }
+ try stderr.print(" refs={d}\n", .{value.refs});
+
+ var part_index = value.flags.parts_len_minus_one;
+ if (part_index > 0) while (true) : (part_index -= 1) {
+ roots.putAssumeCapacityNoClobber(
+ @enumFromInt(@intFromEnum(value.parts) + part_index),
+ root_entry.value + 1,
+ );
+ if (part_index == 0) break;
+ };
+ }
+ try stderr.print("# End {s} Value Dump: {f}\n\n", .{ @typeName(Select), nav.fqn.fmt(ip) });
+}
+
+fn hasRepeatedByteRepr(isel: *Select, constant: Constant) error{OutOfMemory}!?u8 {
+ const zcu = isel.pt.zcu;
+ const ty = constant.typeOf(zcu);
+ const abi_size = std.math.cast(usize, ty.abiSize(zcu)) orelse return null;
+ const byte_buffer = try zcu.gpa.alloc(u8, abi_size);
+ defer zcu.gpa.free(byte_buffer);
+ return if (try isel.writeToMemory(constant, byte_buffer) and
+ std.mem.allEqual(u8, byte_buffer[1..], byte_buffer[0])) byte_buffer[0] else null;
+}
+
+fn writeToMemory(isel: *Select, constant: Constant, buffer: []u8) error{OutOfMemory}!bool {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ if (try isel.writeKeyToMemory(ip.indexToKey(constant.toIntern()), buffer)) return true;
+ constant.writeToMemory(isel.pt, buffer) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ReinterpretDeclRef, error.Unimplemented, error.IllDefinedMemoryLayout => return false,
+ };
+ return true;
+}
+fn writeKeyToMemory(isel: *Select, constant_key: InternPool.Key, buffer: []u8) error{OutOfMemory}!bool {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ switch (constant_key) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .tuple_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+
+ .enum_literal,
+ .empty_enum_value,
+ .memoized_call,
+ => unreachable, // not a runtime value
+ .err => |err| {
+ const error_int = ip.getErrorValueIfExists(err.name).?;
+ switch (buffer.len) {
+ else => unreachable,
+ inline 1...4 => |size| std.mem.writeInt(
+ @Type(.{ .int = .{ .signedness = .unsigned, .bits = 8 * size } }),
+ buffer[0..size],
+ @intCast(error_int),
+ isel.target.cpu.arch.endian(),
+ ),
+ }
+ },
+ .error_union => |error_union| {
+ const error_union_type = ip.indexToKey(error_union.ty).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_type.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ const error_set = buffer[@intCast(codegen.errUnionErrorOffset(payload_ty, zcu))..][0..@intCast(error_set_ty.abiSize(zcu))];
+ switch (error_union.val) {
+ .err_name => |err_name| if (!try isel.writeKeyToMemory(.{ .err = .{
+ .ty = error_set_ty.toIntern(),
+ .name = err_name,
+ } }, error_set)) return false,
+ .payload => |payload| {
+ if (!try isel.writeToMemory(
+ .fromInterned(payload),
+ buffer[@intCast(codegen.errUnionPayloadOffset(payload_ty, zcu))..][0..@intCast(payload_ty.abiSize(zcu))],
+ )) return false;
+ @memset(error_set, 0);
+ },
+ }
+ },
+ .opt => |opt| {
+ const child_size: usize = @intCast(ZigType.fromInterned(ip.indexToKey(opt.ty).opt_type).abiSize(zcu));
+ switch (opt.val) {
+ .none => if (!ZigType.fromInterned(opt.ty).optionalReprIsPayload(zcu)) {
+ buffer[child_size] = @intFromBool(false);
+ } else @memset(buffer[0..child_size], 0x00),
+ else => |child_constant| {
+ if (!try isel.writeToMemory(.fromInterned(child_constant), buffer[0..child_size])) return false;
+ if (!ZigType.fromInterned(opt.ty).optionalReprIsPayload(zcu)) buffer[child_size] = @intFromBool(true);
+ },
+ }
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(aggregate.ty)) {
+ else => unreachable,
+ .array_type => |array_type| {
+ var elem_offset: usize = 0;
+ const elem_size: usize = @intCast(ZigType.fromInterned(array_type.child).abiSize(zcu));
+ const len_including_sentinel: usize = @intCast(array_type.lenIncludingSentinel());
+ switch (aggregate.storage) {
+ .bytes => |bytes| @memcpy(buffer[0..len_including_sentinel], bytes.toSlice(len_including_sentinel, ip)),
+ .elems => |elems| for (elems) |elem| {
+ if (!try isel.writeToMemory(.fromInterned(elem), buffer[elem_offset..][0..elem_size])) return false;
+ elem_offset += elem_size;
+ },
+ .repeated_elem => |repeated_elem| for (0..len_including_sentinel) |_| {
+ if (!try isel.writeToMemory(.fromInterned(repeated_elem), buffer[elem_offset..][0..elem_size])) return false;
+ elem_offset += elem_size;
+ },
+ }
+ },
+ .vector_type => return false,
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(aggregate.ty);
+ switch (loaded_struct.layout) {
+ .auto => {
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (!try isel.writeToMemory(.fromInterned(switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }), buffer[@intCast(field_offset)..][0..@intCast(field_size)])) return false;
+ field_offset += field_size;
+ }
+ },
+ .@"extern", .@"packed" => return false,
+ }
+ },
+ .tuple_type => |tuple_type| {
+ var field_offset: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip), 0..) |field_type, field_value, field_index| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ field_offset = field_ty.abiAlignment(zcu).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (!try isel.writeToMemory(.fromInterned(switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }), buffer[@intCast(field_offset)..][0..@intCast(field_size)])) return false;
+ field_offset += field_size;
+ }
+ },
+ },
+ else => return false,
+ }
+ return true;
+}
+
+const TryAllocRegResult = union(enum) {
+ allocated: Register.Alias,
+ fill_candidate: Register.Alias,
+ out_of_registers,
+};
+
+fn tryAllocIntReg(isel: *Select) TryAllocRegResult {
+ var failed_result: TryAllocRegResult = .out_of_registers;
+ var ra: Register.Alias = .r0;
+ while (true) : (ra = @enumFromInt(@intFromEnum(ra) + 1)) {
+ if (ra == .r18) continue; // The Platform Register
+ if (ra == Register.Alias.fp) continue;
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => switch (failed_result) {
+ .allocated => unreachable,
+ .fill_candidate => {},
+ .out_of_registers => failed_result = .{ .fill_candidate = ra },
+ },
+ .allocating => {},
+ .free => {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(ra);
+ return .{ .allocated = ra };
+ },
+ }
+ if (ra == Register.Alias.lr) return failed_result;
+ }
+}
+
+fn allocIntReg(isel: *Select) !Register.Alias {
+ switch (isel.tryAllocIntReg()) {
+ .allocated => |ra| return ra,
+ .fill_candidate => |ra| {
+ assert(try isel.fillMemory(ra));
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ return ra;
+ },
+ .out_of_registers => return isel.fail("ran out of registers", .{}),
+ }
+}
+
+fn tryAllocVecReg(isel: *Select) TryAllocRegResult {
+ var failed_result: TryAllocRegResult = .out_of_registers;
+ var ra: Register.Alias = .v0;
+ while (true) : (ra = @enumFromInt(@intFromEnum(ra) + 1)) {
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => switch (failed_result) {
+ .allocated => unreachable,
+ .fill_candidate => {},
+ .out_of_registers => failed_result = .{ .fill_candidate = ra },
+ },
+ .allocating => {},
+ .free => {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(ra);
+ return .{ .allocated = ra };
+ },
+ }
+ if (ra == Register.Alias.v31) return failed_result;
+ }
+}
+
+fn allocVecReg(isel: *Select) !Register.Alias {
+ switch (isel.tryAllocVecReg()) {
+ .allocated => |ra| return ra,
+ .fill_candidate => |ra| {
+ assert(try isel.fillMemory(ra));
+ return ra;
+ },
+ .out_of_registers => return isel.fail("ran out of registers", .{}),
+ }
+}
+
+const RegLock = struct {
+ ra: Register.Alias,
+ const empty: RegLock = .{ .ra = .zr };
+ fn unlock(lock: RegLock, isel: *Select) void {
+ switch (lock.ra) {
+ else => |ra| isel.freeReg(ra),
+ .zr => {},
+ }
+ }
+};
+fn lockReg(isel: *Select, ra: Register.Alias) RegLock {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ return .{ .ra = ra };
+}
+fn tryLockReg(isel: *Select, ra: Register.Alias) RegLock {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => unreachable,
+ .allocating => return .{ .ra = .zr },
+ .free => {
+ live_vi.* = .allocating;
+ return .{ .ra = ra };
+ },
+ }
+}
+
+fn freeReg(isel: *Select, ra: Register.Alias) void {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .allocating);
+ live_vi.* = .free;
+}
+
+fn use(isel: *Select, air_ref: Air.Inst.Ref) !Value.Index {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ try isel.values.ensureUnusedCapacity(zcu.gpa, 1);
+ const vi, const ty = if (air_ref.toIndex()) |air_inst_index| vi_ty: {
+ const live_gop = try isel.live_values.getOrPut(zcu.gpa, air_inst_index);
+ if (live_gop.found_existing) return live_gop.value_ptr.*;
+ const ty = isel.air.typeOf(air_ref, ip);
+ const vi = isel.initValue(ty);
+ tracking_log.debug("${d} <- %{d}", .{
+ @intFromEnum(vi),
+ @intFromEnum(air_inst_index),
+ });
+ live_gop.value_ptr.* = vi.ref(isel);
+ break :vi_ty .{ vi, ty };
+ } else vi_ty: {
+ const constant: Constant = .fromInterned(air_ref.toInterned().?);
+ const ty = constant.typeOf(zcu);
+ const vi = isel.initValue(ty);
+ tracking_log.debug("${d} <- <{f}, {f}>", .{
+ @intFromEnum(vi),
+ isel.fmtType(ty),
+ isel.fmtConstant(constant),
+ });
+ vi.setParent(isel, .{ .constant = constant });
+ break :vi_ty .{ vi, ty };
+ };
+ if (ty.isAbiInt(zcu)) {
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits <= 16) vi.setSignedness(isel, int_info.signedness);
+ } else if (vi.size(isel) <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, ty.toIntern()) != null) vi.setIsVector(isel);
+ return vi;
+}
+
+fn fill(isel: *Select, dst_ra: Register.Alias) error{ OutOfMemory, CodegenFail }!bool {
+ switch (dst_ra) {
+ else => {},
+ Register.Alias.fp, .zr, .sp, .pc, .fpcr, .fpsr, .ffr => return false,
+ }
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ const dst_vi = switch (dst_live_vi.*) {
+ _ => |dst_vi| dst_vi,
+ .allocating => return false,
+ .free => return true,
+ };
+ const src_ra = src_ra: {
+ if (dst_vi.hint(isel)) |hint_ra| {
+ assert(dst_live_vi.* == dst_vi);
+ dst_live_vi.* = .allocating;
+ defer dst_live_vi.* = dst_vi;
+ if (try isel.fill(hint_ra)) {
+ isel.saved_registers.insert(hint_ra);
+ break :src_ra hint_ra;
+ }
+ }
+ switch (if (dst_vi.isVector(isel)) isel.tryAllocVecReg() else isel.tryAllocIntReg()) {
+ .allocated => |ra| break :src_ra ra,
+ .fill_candidate, .out_of_registers => return isel.fillMemory(dst_ra),
+ }
+ };
+ try dst_vi.liveIn(isel, src_ra, comptime &.initFill(.free));
+ const src_live_vi = isel.live_registers.getPtr(src_ra);
+ assert(src_live_vi.* == .allocating);
+ src_live_vi.* = dst_vi;
+ return true;
+}
+
+fn fillMemory(isel: *Select, dst_ra: Register.Alias) error{ OutOfMemory, CodegenFail }!bool {
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ const dst_vi = switch (dst_live_vi.*) {
+ _ => |dst_vi| dst_vi,
+ .allocating => return false,
+ .free => return true,
+ };
+ const dst_vi_ra = &dst_vi.get(isel).location_payload.small.register;
+ assert(dst_vi_ra.* == dst_ra);
+ const base_ra = if (dst_ra.isVector()) try isel.allocIntReg() else dst_ra;
+ defer if (base_ra != dst_ra) isel.freeReg(base_ra);
+ try isel.emit(switch (dst_vi.size(isel)) {
+ else => unreachable,
+ 1 => if (dst_ra.isVector())
+ .ldr(dst_ra.b(), .{ .base = base_ra.x() })
+ else switch (dst_vi.signedness(isel)) {
+ .signed => .ldrsb(dst_ra.w(), .{ .base = base_ra.x() }),
+ .unsigned => .ldrb(dst_ra.w(), .{ .base = base_ra.x() }),
+ },
+ 2 => if (dst_ra.isVector())
+ .ldr(dst_ra.h(), .{ .base = base_ra.x() })
+ else switch (dst_vi.signedness(isel)) {
+ .signed => .ldrsh(dst_ra.w(), .{ .base = base_ra.x() }),
+ .unsigned => .ldrh(dst_ra.w(), .{ .base = base_ra.x() }),
+ },
+ 4 => .ldr(if (dst_ra.isVector()) dst_ra.s() else dst_ra.w(), .{ .base = base_ra.x() }),
+ 8 => .ldr(if (dst_ra.isVector()) dst_ra.d() else dst_ra.x(), .{ .base = base_ra.x() }),
+ 16 => .ldr(dst_ra.q(), .{ .base = base_ra.x() }),
+ });
+ dst_vi_ra.* = .zr;
+ try dst_vi.address(isel, 0, base_ra);
+ dst_live_vi.* = .free;
+ return true;
+}
+
+/// Merges possibly differing value tracking into a consistent state.
+///
+/// At a conditional branch, if a value is expected in the same register on both
+/// paths, or only expected in a register on only one path, tracking is updated:
+///
+/// $0 -> r0 // final state is now consistent with both paths
+/// b.cond else
+/// then:
+/// $0 -> r0 // updated if not already consistent with else
+/// ...
+/// b end
+/// else:
+/// $0 -> r0
+/// ...
+/// end:
+///
+/// At a conditional branch, if a value is expected in different registers on
+/// each path, mov instructions are emitted:
+///
+/// $0 -> r0 // final state is now consistent with both paths
+/// b.cond else
+/// then:
+/// $0 -> r0 // updated to be consistent with else
+/// mov x1, x0 // emitted to merge the inconsistent states
+/// $0 -> r1
+/// ...
+/// b end
+/// else:
+/// $0 -> r0
+/// ...
+/// end:
+///
+/// At a loop, a value that is expected in a register at the repeats is updated:
+///
+/// $0 -> r0 // final state is now consistent with all paths
+/// loop:
+/// $0 -> r0 // updated to be consistent with the repeats
+/// ...
+/// $0 -> r0
+/// b.cond loop
+/// ...
+/// $0 -> r0
+/// b loop
+///
+/// At a loop, a value that is expected in a register at the top is filled:
+///
+/// $0 -> [sp, #A] // final state is now consistent with all paths
+/// loop:
+/// $0 -> [sp, #A] // updated to be consistent with the repeats
+/// ldr x0, [sp, #A] // emitted to merge the inconsistent states
+/// $0 -> r0
+/// ...
+/// $0 -> [sp, #A]
+/// b.cond loop
+/// ...
+/// $0 -> [sp, #A]
+/// b loop
+///
+/// At a loop, if a value that is expected in different registers on each path,
+/// mov instructions are emitted:
+///
+/// $0 -> r0 // final state is now consistent with all paths
+/// loop:
+/// $0 -> r0 // updated to be consistent with the repeats
+/// mov x1, x0 // emitted to merge the inconsistent states
+/// $0 -> r1
+/// ...
+/// $0 -> r0
+/// b.cond loop
+/// ...
+/// $0 -> r0
+/// b loop
+fn merge(
+ isel: *Select,
+ expected_live_registers: *const LiveRegisters,
+ comptime opts: struct { fill_extra: bool = false },
+) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ else => switch (actual_vi.*) {
+ _ => {},
+ .allocating => unreachable,
+ .free => actual_vi.* = .allocating,
+ },
+ .free => {},
+ }
+ }
+ live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ _ => {
+ switch (actual_vi.*) {
+ _ => _ = if (opts.fill_extra) {
+ assert(try isel.fillMemory(ra));
+ assert(actual_vi.* == .free);
+ },
+ .allocating => actual_vi.* = .free,
+ .free => unreachable,
+ }
+ try expected_vi.liveIn(isel, ra, expected_live_registers);
+ },
+ .allocating => if (if (opts.fill_extra) try isel.fillMemory(ra) else try isel.fill(ra)) {
+ assert(actual_vi.* == .free);
+ actual_vi.* = .allocating;
+ },
+ .free => if (opts.fill_extra) assert(try isel.fillMemory(ra) and actual_vi.* == .free),
+ }
+ }
+ live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ _ => {
+ assert(actual_vi.* == .allocating and expected_vi.register(isel) == ra);
+ actual_vi.* = expected_vi;
+ },
+ .allocating => assert(actual_vi.* == .allocating),
+ .free => if (opts.fill_extra) assert(actual_vi.* == .free),
+ }
+ }
+}
+
+const call = struct {
+ const param_reg: Value.Index = @enumFromInt(@intFromEnum(Value.Index.allocating) - 2);
+ const callee_clobbered_reg: Value.Index = @enumFromInt(@intFromEnum(Value.Index.allocating) - 1);
+ const caller_saved_regs: LiveRegisters = .init(.{
+ .r0 = param_reg,
+ .r1 = param_reg,
+ .r2 = param_reg,
+ .r3 = param_reg,
+ .r4 = param_reg,
+ .r5 = param_reg,
+ .r6 = param_reg,
+ .r7 = param_reg,
+ .r8 = param_reg,
+ .r9 = callee_clobbered_reg,
+ .r10 = callee_clobbered_reg,
+ .r11 = callee_clobbered_reg,
+ .r12 = callee_clobbered_reg,
+ .r13 = callee_clobbered_reg,
+ .r14 = callee_clobbered_reg,
+ .r15 = callee_clobbered_reg,
+ .r16 = callee_clobbered_reg,
+ .r17 = callee_clobbered_reg,
+ .r18 = callee_clobbered_reg,
+ .r19 = .free,
+ .r20 = .free,
+ .r21 = .free,
+ .r22 = .free,
+ .r23 = .free,
+ .r24 = .free,
+ .r25 = .free,
+ .r26 = .free,
+ .r27 = .free,
+ .r28 = .free,
+ .r29 = .free,
+ .r30 = callee_clobbered_reg,
+ .zr = .free,
+ .sp = .free,
+
+ .pc = .free,
+
+ .v0 = param_reg,
+ .v1 = param_reg,
+ .v2 = param_reg,
+ .v3 = param_reg,
+ .v4 = param_reg,
+ .v5 = param_reg,
+ .v6 = param_reg,
+ .v7 = param_reg,
+ .v8 = .free,
+ .v9 = .free,
+ .v10 = .free,
+ .v11 = .free,
+ .v12 = .free,
+ .v13 = .free,
+ .v14 = .free,
+ .v15 = .free,
+ .v16 = callee_clobbered_reg,
+ .v17 = callee_clobbered_reg,
+ .v18 = callee_clobbered_reg,
+ .v19 = callee_clobbered_reg,
+ .v20 = callee_clobbered_reg,
+ .v21 = callee_clobbered_reg,
+ .v22 = callee_clobbered_reg,
+ .v23 = callee_clobbered_reg,
+ .v24 = callee_clobbered_reg,
+ .v25 = callee_clobbered_reg,
+ .v26 = callee_clobbered_reg,
+ .v27 = callee_clobbered_reg,
+ .v28 = callee_clobbered_reg,
+ .v29 = callee_clobbered_reg,
+ .v30 = callee_clobbered_reg,
+ .v31 = callee_clobbered_reg,
+
+ .fpcr = .free,
+ .fpsr = .free,
+
+ .p0 = callee_clobbered_reg,
+ .p1 = callee_clobbered_reg,
+ .p2 = callee_clobbered_reg,
+ .p3 = callee_clobbered_reg,
+ .p4 = callee_clobbered_reg,
+ .p5 = callee_clobbered_reg,
+ .p6 = callee_clobbered_reg,
+ .p7 = callee_clobbered_reg,
+ .p8 = callee_clobbered_reg,
+ .p9 = callee_clobbered_reg,
+ .p10 = callee_clobbered_reg,
+ .p11 = callee_clobbered_reg,
+ .p12 = callee_clobbered_reg,
+ .p13 = callee_clobbered_reg,
+ .p14 = callee_clobbered_reg,
+ .p15 = callee_clobbered_reg,
+
+ .ffr = .free,
+ });
+ fn prepareReturn(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => switch (live_reg_entry.value.*) {
+ _ => {},
+ .allocating => unreachable,
+ .free => live_reg_entry.value.* = .allocating,
+ },
+ .free => {},
+ };
+ }
+ fn returnFill(isel: *Select, ra: Register.Alias) !void {
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (try isel.fill(ra)) {
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ }
+ assert(live_vi.* == .allocating);
+ }
+ fn returnLiveIn(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ try vi.defLiveIn(isel, ra, &caller_saved_regs);
+ }
+ fn finishReturn(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ switch (live_reg_entry.value.*) {
+ _ => |live_vi| switch (live_vi.size(isel)) {
+ else => unreachable,
+ 1, 2, 4, 8 => {},
+ 16 => {
+ assert(try isel.fillMemory(live_reg_entry.key));
+ assert(live_reg_entry.value.* == .free);
+ switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => live_reg_entry.value.* = .allocating,
+ .free => {},
+ }
+ continue;
+ },
+ },
+ .allocating, .free => {},
+ }
+ switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => switch (live_reg_entry.value.*) {
+ _ => {
+ assert(try isel.fill(live_reg_entry.key));
+ assert(live_reg_entry.value.* == .free);
+ live_reg_entry.value.* = .allocating;
+ },
+ .allocating => {},
+ .free => unreachable,
+ },
+ .free => {},
+ }
+ }
+ }
+ fn prepareCallee(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg => assert(live_reg_entry.value.* == .allocating),
+ callee_clobbered_reg => isel.freeReg(live_reg_entry.key),
+ .free => {},
+ };
+ }
+ fn finishCallee(_: *Select) !void {}
+ fn prepareParams(_: *Select) !void {}
+ fn paramLiveOut(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ isel.freeReg(ra);
+ try vi.liveOut(isel, ra);
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (live_vi.* == .free) live_vi.* = .allocating;
+ }
+ fn paramAddress(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ isel.freeReg(ra);
+ try vi.address(isel, 0, ra);
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (live_vi.* == .free) live_vi.* = .allocating;
+ }
+ fn finishParams(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg => switch (live_reg_entry.value.*) {
+ _ => {},
+ .allocating => live_reg_entry.value.* = .free,
+ .free => unreachable,
+ },
+ callee_clobbered_reg, .free => {},
+ };
+ }
+};
+
+pub const CallAbiIterator = struct {
+ /// Next General-purpose Register Number
+ ngrn: Register.Alias,
+ /// Next SIMD and Floating-point Register Number
+ nsrn: Register.Alias,
+ /// next stacked argument address
+ nsaa: u24,
+
+ pub const ngrn_start: Register.Alias = .r0;
+ pub const ngrn_end: Register.Alias = .r8;
+ pub const nsrn_start: Register.Alias = .v0;
+ pub const nsrn_end: Register.Alias = .v8;
+ pub const nsaa_start: u42 = 0;
+
+ pub const init: CallAbiIterator = .{
+ // A.1
+ .ngrn = ngrn_start,
+ // A.2
+ .nsrn = nsrn_start,
+ // A.3
+ .nsaa = nsaa_start,
+ };
+
+ pub fn param(it: *CallAbiIterator, isel: *Select, ty: ZigType) !?Value.Index {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+
+ if (ty.isNoReturn(zcu) or !ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
+ try isel.values.ensureUnusedCapacity(zcu.gpa, Value.max_parts);
+ const wip_vi = isel.initValue(ty);
+ type_key: switch (ip.indexToKey(ty.toIntern())) {
+ else => return isel.fail("CallAbiIterator.param({f})", .{isel.fmtType(ty)}),
+ .int_type => |int_type| switch (int_type.bits) {
+ 0 => unreachable,
+ 1...16 => {
+ wip_vi.setSignedness(isel, int_type.signedness);
+ // C.7
+ it.integer(isel, wip_vi);
+ },
+ // C.7
+ 17...64 => it.integer(isel, wip_vi),
+ // C.9
+ 65...128 => it.integers(isel, wip_vi, @splat(@divExact(wip_vi.size(isel), 2))),
+ else => it.indirect(isel, wip_vi),
+ },
+ .array_type => switch (wip_vi.size(isel)) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => |size| it.integers(isel, wip_vi, .{ 8, size - 8 }),
+ else => it.indirect(isel, wip_vi),
+ },
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .one, .many, .c => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 64,
+ } },
+ .slice => it.integers(isel, wip_vi, @splat(8)),
+ },
+ .opt_type => |child_type| if (ty.optionalReprIsPayload(zcu))
+ continue :type_key ip.indexToKey(child_type)
+ else switch (ZigType.fromInterned(child_type).abiSize(zcu)) {
+ 0 => continue :type_key .{ .simple_type = .bool },
+ 1...7 => it.integer(isel, wip_vi),
+ 8...15 => |child_size| it.integers(isel, wip_vi, .{ 8, child_size - 7 }),
+ else => return isel.fail("CallAbiIterator.param({f})", .{isel.fmtType(ty)}),
+ },
+ .anyframe_type => unreachable,
+ .error_union_type => |error_union_type| switch (wip_vi.size(isel)) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var sizes: [2]u64 = @splat(0);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ {
+ const error_set_ty: ZigType = .fromInterned(error_union_type.error_set_type);
+ const offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const end = offset % 8 + error_set_ty.abiSize(zcu);
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ {
+ const offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const end = offset % 8 + payload_ty.abiSize(zcu);
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ it.integers(isel, wip_vi, sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16, .f32, .f64, .f128, .c_longdouble => it.vector(isel, wip_vi),
+ .f80 => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 80 } },
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ => continue :type_key .{ .int_type = ty.intInfo(zcu) },
+ // B.1
+ .anyopaque => it.indirect(isel, wip_vi),
+ .bool => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 1 } },
+ .anyerror => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ } },
+ .void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .null,
+ .undefined,
+ .enum_literal,
+ .adhoc_inferred_error_set,
+ .generic_poison,
+ => unreachable,
+ },
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ switch (loaded_struct.layout) {
+ .auto, .@"extern" => {},
+ .@"packed" => continue :type_key .{
+ .int_type = ip.indexToKey(loaded_struct.backingIntTypeUnordered(ip)).int_type,
+ },
+ }
+ const size = wip_vi.size(isel);
+ if (size <= 16 * 4) homogeneous_aggregate: {
+ const fdt = homogeneousStructBaseType(zcu, &loaded_struct) orelse break :homogeneous_aggregate;
+ const parts_len = @shrExact(size, fdt.log2Size());
+ if (parts_len > 4) break :homogeneous_aggregate;
+ it.vectors(isel, wip_vi, fdt, @intCast(parts_len));
+ break :type_key;
+ }
+ switch (size) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var part_offset: u64 = 0;
+ var part_sizes: [2]u64 = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var next_field_end: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (part_offset < size) {
+ const field_end = next_field_end;
+ const next_field_begin = if (field_it.next()) |field_index| next_field_begin: {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ const next_field_begin = switch (loaded_struct.fieldAlign(ip, field_index)) {
+ .none => field_ty.abiAlignment(zcu),
+ else => |field_align| field_align,
+ }.forward(field_end);
+ next_field_end = next_field_begin + field_ty.abiSize(zcu);
+ break :next_field_begin next_field_begin;
+ } else std.mem.alignForward(u64, size, 8);
+ while (next_field_begin - part_offset >= 8) {
+ const part_size = field_end - part_offset;
+ part_sizes[parts_len] = part_size;
+ assert(part_offset + part_size <= size);
+ parts_len += 1;
+ part_offset = next_field_begin;
+ }
+ }
+ assert(parts_len == part_sizes.len);
+ it.integers(isel, wip_vi, part_sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ }
+ },
+ .tuple_type => |tuple_type| {
+ const size = wip_vi.size(isel);
+ if (size <= 16 * 4) homogeneous_aggregate: {
+ const fdt = homogeneousTupleBaseType(zcu, tuple_type) orelse break :homogeneous_aggregate;
+ const parts_len = @shrExact(size, fdt.log2Size());
+ if (parts_len > 4) break :homogeneous_aggregate;
+ it.vectors(isel, wip_vi, fdt, @intCast(parts_len));
+ break :type_key;
+ }
+ switch (size) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var part_offset: u64 = 0;
+ var part_sizes: [2]u64 = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var next_field_end: u64 = 0;
+ var field_index: usize = 0;
+ while (part_offset < size) {
+ const field_end = next_field_end;
+ const next_field_begin = while (field_index < tuple_type.types.len) {
+ defer field_index += 1;
+ if (tuple_type.values.get(ip)[field_index] != .none) continue;
+ const field_ty: ZigType = .fromInterned(tuple_type.types.get(ip)[field_index]);
+ const next_field_begin = field_ty.abiAlignment(zcu).forward(field_end);
+ next_field_end = next_field_begin + field_ty.abiSize(zcu);
+ break next_field_begin;
+ } else std.mem.alignForward(u64, size, 8);
+ while (next_field_begin - part_offset >= 8) {
+ const part_size = @min(field_end - part_offset, 8);
+ part_sizes[parts_len] = part_size;
+ assert(part_offset + part_size <= size);
+ parts_len += 1;
+ part_offset += part_size;
+ if (part_offset >= field_end) part_offset = next_field_begin;
+ }
+ }
+ assert(parts_len == part_sizes.len);
+ it.integers(isel, wip_vi, part_sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ }
+ },
+ .union_type => {
+ const loaded_union = ip.loadUnionType(ty.toIntern());
+ switch (loaded_union.flagsUnordered(ip).layout) {
+ .auto, .@"extern" => {},
+ .@"packed" => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = @intCast(ty.bitSize(zcu)),
+ } },
+ }
+ switch (wip_vi.size(isel)) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ const union_layout = ZigType.getUnionLayout(loaded_union, zcu);
+ var sizes: [2]u64 = @splat(0);
+ {
+ const offset = union_layout.tagOffset();
+ const end = offset % 8 + union_layout.tag_size;
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ {
+ const offset = union_layout.payloadOffset();
+ const end = offset % 8 + union_layout.payload_size;
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ it.integers(isel, wip_vi, sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ }
+ },
+ .opaque_type, .func_type => continue :type_key .{ .simple_type = .anyopaque },
+ .enum_type => continue :type_key ip.indexToKey(ip.loadEnumType(ty.toIntern()).tag_ty),
+ .error_set_type,
+ .inferred_error_set_type,
+ => continue :type_key .{ .simple_type = .anyerror },
+ .undef,
+ .simple_value,
+ .variable,
+ .@"extern",
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable, // values, not types
+ }
+ return wip_vi.ref(isel);
+ }
+
+ pub fn nonSysvVarArg(it: *CallAbiIterator, isel: *Select, ty: ZigType) !?Value.Index {
+ const ngrn = it.ngrn;
+ defer it.ngrn = ngrn;
+ it.ngrn = ngrn_end;
+ const nsrn = it.nsrn;
+ defer it.nsrn = nsrn;
+ it.nsrn = nsrn_end;
+ return it.param(isel, ty);
+ }
+
+ pub fn ret(it: *CallAbiIterator, isel: *Select, ty: ZigType) !?Value.Index {
+ const wip_vi = try it.param(isel, ty) orelse return null;
+ switch (wip_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ assert(address_vi.hint(isel) == ngrn_start);
+ address_vi.setHint(isel, ngrn_end);
+ },
+ }
+ return wip_vi;
+ }
+
+ pub const FundamentalDataType = enum {
+ half,
+ single,
+ double,
+ quad,
+ vector64,
+ vector128,
+ fn log2Size(fdt: FundamentalDataType) u3 {
+ return switch (fdt) {
+ .half => 1,
+ .single => 2,
+ .double, .vector64 => 3,
+ .quad, .vector128 => 4,
+ };
+ }
+ fn size(fdt: FundamentalDataType) u64 {
+ return @as(u64, 1) << fdt.log2Size();
+ }
+ };
+ fn homogeneousAggregateBaseType(zcu: *Zcu, initial_ty: InternPool.Index) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var ty = initial_ty;
+ return type_key: switch (ip.indexToKey(ty)) {
+ else => null,
+ .array_type => |array_type| {
+ ty = array_type.child;
+ continue :type_key ip.indexToKey(ty);
+ },
+ .vector_type => switch (ZigType.fromInterned(ty).abiSize(zcu)) {
+ else => null,
+ 8 => .vector64,
+ 16 => .vector128,
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16 => .half,
+ .f32 => .single,
+ .f64 => .double,
+ .f128 => .quad,
+ .c_longdouble => switch (zcu.getTarget().cTypeBitSize(.longdouble)) {
+ else => unreachable,
+ 16 => .half,
+ 32 => .single,
+ 64 => .double,
+ 80 => null,
+ 128 => .quad,
+ },
+ else => null,
+ },
+ .struct_type => homogeneousStructBaseType(zcu, &ip.loadStructType(ty)),
+ .tuple_type => |tuple_type| homogeneousTupleBaseType(zcu, tuple_type),
+ };
+ }
+ fn homogeneousStructBaseType(zcu: *Zcu, loaded_struct: *const InternPool.LoadedStructType) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var common_fdt: ?FundamentalDataType = null;
+ for (0.., loaded_struct.field_types.get(ip)) |field_index, field_ty| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ if (loaded_struct.fieldAlign(ip, field_index) != .none) return null;
+ if (!ZigType.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
+ const fdt = homogeneousAggregateBaseType(zcu, field_ty);
+ if (common_fdt == null) common_fdt = fdt else if (fdt != common_fdt) return null;
+ }
+ return common_fdt;
+ }
+ fn homogeneousTupleBaseType(zcu: *Zcu, tuple_type: InternPool.Key.TupleType) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var common_fdt: ?FundamentalDataType = null;
+ for (tuple_type.values.get(ip), tuple_type.types.get(ip)) |field_val, field_ty| {
+ if (field_val != .none) continue;
+ const fdt = homogeneousAggregateBaseType(zcu, field_ty);
+ if (common_fdt == null) common_fdt = fdt else if (fdt != common_fdt) return null;
+ }
+ return common_fdt;
+ }
+
+ const Spec = struct {
+ offset: u64,
+ size: u64,
+ };
+
+ fn stack(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ // C.12
+ it.nsaa = @intCast(wip_vi.alignment(isel).forward(it.nsaa));
+ const parent_vi = switch (wip_vi.parent(isel)) {
+ .unallocated, .stack_slot => wip_vi,
+ .address, .constant => unreachable,
+ .value => |parent_vi| parent_vi,
+ };
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = .{
+ .base = .sp,
+ .offset = it.nsaa,
+ } }),
+ .stack_slot => {},
+ .address, .value, .constant => unreachable,
+ }
+ it.nsaa += @intCast(wip_vi.size(isel));
+ }
+
+ fn integer(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ assert(wip_vi.size(isel) <= 8);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ if (it.ngrn == ngrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setHint(isel, it.ngrn);
+ it.ngrn = @enumFromInt(@intFromEnum(it.ngrn) + 1);
+ }
+
+ fn integers(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index, part_sizes: [2]u64) void {
+ assert(wip_vi.size(isel) <= 16);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ // C.8
+ if (natural_alignment == .@"16") it.ngrn = @enumFromInt(std.mem.alignForward(
+ @typeInfo(Register.Alias).@"enum".tag_type,
+ @intFromEnum(it.ngrn),
+ 2,
+ ));
+ if (it.ngrn == ngrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setParts(isel, part_sizes.len);
+ for (0.., part_sizes) |part_index, part_size|
+ it.integer(isel, wip_vi.addPart(isel, 8 * part_index, part_size));
+ }
+
+ fn vector(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ assert(wip_vi.size(isel) <= 16);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ wip_vi.setIsVector(isel);
+ if (it.nsrn == nsrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setHint(isel, it.nsrn);
+ it.nsrn = @enumFromInt(@intFromEnum(it.nsrn) + 1);
+ }
+
+ fn vectors(
+ it: *CallAbiIterator,
+ isel: *Select,
+ wip_vi: Value.Index,
+ fdt: FundamentalDataType,
+ parts_len: Value.PartsLen,
+ ) void {
+ const fdt_log2_size = fdt.log2Size();
+ assert(wip_vi.size(isel) == @shlExact(@as(u9, parts_len), fdt_log2_size));
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ if (@intFromEnum(it.nsrn) > @intFromEnum(nsrn_end) - parts_len) return it.stack(isel, wip_vi);
+ if (parts_len == 1) return it.vector(isel, wip_vi);
+ wip_vi.setParts(isel, parts_len);
+ const fdt_size = @as(u64, 1) << fdt_log2_size;
+ for (0..parts_len) |part_index|
+ it.vector(isel, wip_vi.addPart(isel, part_index << fdt_log2_size, fdt_size));
+ }
+
+ fn indirect(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ const wip_address_vi = isel.initValue(.usize);
+ wip_vi.setParent(isel, .{ .address = wip_address_vi });
+ it.integer(isel, wip_address_vi);
+ }
+};
+
+const Air = @import("../../Air.zig");
+const assert = std.debug.assert;
+const codegen = @import("../../codegen.zig");
+const Constant = @import("../../Value.zig");
+const InternPool = @import("../../InternPool.zig");
+const Package = @import("../../Package.zig");
+const Register = codegen.aarch64.encoding.Register;
+const Select = @This();
+const std = @import("std");
+const tracking_log = std.log.scoped(.tracking);
+const wip_mir_log = std.log.scoped(.@"wip-mir");
+const Zcu = @import("../../Zcu.zig");
+const ZigType = @import("../../Type.zig");
diff --git a/src/codegen/aarch64/abi.zig b/src/codegen/aarch64/abi.zig
index 0cd0b389b1..9587415287 100644
--- a/src/codegen/aarch64/abi.zig
+++ b/src/codegen/aarch64/abi.zig
@@ -1,7 +1,5 @@
+const assert = @import("std").debug.assert;
const std = @import("std");
-const builtin = @import("builtin");
-const bits = @import("../../arch/aarch64/bits.zig");
-const Register = bits.Register;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
@@ -15,7 +13,7 @@ pub const Class = union(enum) {
/// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, zcu: *Zcu) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
+ assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(zcu)) {
@@ -47,11 +45,11 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
return .byval;
},
.optional => {
- std.debug.assert(ty.isPtrLikeOptional(zcu));
+ assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
.pointer => {
- std.debug.assert(!ty.isSlice(zcu));
+ assert(!ty.isSlice(zcu));
return .byval;
},
.error_union,
@@ -138,13 +136,3 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
else => return null,
}
}
-
-pub const callee_preserved_regs = [_]Register{
- .x19, .x20, .x21, .x22, .x23,
- .x24, .x25, .x26, .x27, .x28,
-};
-
-pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
-pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
-
-const allocatable_registers = callee_preserved_regs;
diff --git a/src/codegen/aarch64/encoding.zig b/src/codegen/aarch64/encoding.zig
new file mode 100644
index 0000000000..6ddf46b625
--- /dev/null
+++ b/src/codegen/aarch64/encoding.zig
@@ -0,0 +1,12194 @@
+/// B1.2 Registers in AArch64 Execution state
+pub const Register = struct {
+ alias: Alias,
+ format: Format,
+
+ pub const Format = union(enum) {
+ alias,
+ integer: IntegerSize,
+ scalar: VectorSize,
+ vector: Arrangement,
+ element: struct { size: VectorSize, index: u4 },
+ };
+
+ pub const IntegerSize = enum(u1) {
+ word = 0b0,
+ doubleword = 0b1,
+
+ pub fn prefix(is: IntegerSize) u8 {
+ return (comptime std.enums.EnumArray(IntegerSize, u8).init(.{
+ .word = 'w',
+ .doubleword = 'x',
+ })).get(is);
+ }
+ };
+
+ pub const VectorSize = enum(u3) {
+ byte = 0,
+ half = 1,
+ single = 2,
+ double = 3,
+ quad = 4,
+ scalable,
+ predicate,
+
+ pub fn prefix(vs: VectorSize) u8 {
+ return (comptime std.enums.EnumArray(VectorSize, u8).init(.{
+ .byte = 'b',
+ .half = 'h',
+ .single = 's',
+ .double = 'd',
+ .quad = 'q',
+ .scalable = 'z',
+ .predicate = 'p',
+ })).get(vs);
+ }
+ };
+
+ pub const Arrangement = enum {
+ @"2d",
+ @"4s",
+ @"8h",
+ @"16b",
+
+ @"1d",
+ @"2s",
+ @"4h",
+ @"8b",
+
+ pub fn len(arrangement: Arrangement) u5 {
+ return switch (arrangement) {
+ .@"1d" => 1,
+ .@"2d", .@"2s" => 2,
+ .@"4s", .@"4h" => 4,
+ .@"8h", .@"8b" => 8,
+ .@"16b" => 16,
+ };
+ }
+
+ pub fn size(arrangement: Arrangement) Instruction.DataProcessingVector.Q {
+ return switch (arrangement) {
+ .@"2d", .@"4s", .@"8h", .@"16b" => .quad,
+ .@"1d", .@"2s", .@"4h", .@"8b" => .double,
+ };
+ }
+
+ pub fn elemSize(arrangement: Arrangement) Instruction.DataProcessingVector.Size {
+ return switch (arrangement) {
+ .@"2d", .@"1d" => .double,
+ .@"4s", .@"2s" => .single,
+ .@"8h", .@"4h" => .half,
+ .@"16b", .@"8b" => .byte,
+ };
+ }
+ };
+
+ pub const x0: Register = .{ .alias = .r0, .format = .{ .integer = .doubleword } };
+ pub const x1: Register = .{ .alias = .r1, .format = .{ .integer = .doubleword } };
+ pub const x2: Register = .{ .alias = .r2, .format = .{ .integer = .doubleword } };
+ pub const x3: Register = .{ .alias = .r3, .format = .{ .integer = .doubleword } };
+ pub const x4: Register = .{ .alias = .r4, .format = .{ .integer = .doubleword } };
+ pub const x5: Register = .{ .alias = .r5, .format = .{ .integer = .doubleword } };
+ pub const x6: Register = .{ .alias = .r6, .format = .{ .integer = .doubleword } };
+ pub const x7: Register = .{ .alias = .r7, .format = .{ .integer = .doubleword } };
+ pub const x8: Register = .{ .alias = .r8, .format = .{ .integer = .doubleword } };
+ pub const x9: Register = .{ .alias = .r9, .format = .{ .integer = .doubleword } };
+ pub const x10: Register = .{ .alias = .r10, .format = .{ .integer = .doubleword } };
+ pub const x11: Register = .{ .alias = .r11, .format = .{ .integer = .doubleword } };
+ pub const x12: Register = .{ .alias = .r12, .format = .{ .integer = .doubleword } };
+ pub const x13: Register = .{ .alias = .r13, .format = .{ .integer = .doubleword } };
+ pub const x14: Register = .{ .alias = .r14, .format = .{ .integer = .doubleword } };
+ pub const x15: Register = .{ .alias = .r15, .format = .{ .integer = .doubleword } };
+ pub const x16: Register = .{ .alias = .r16, .format = .{ .integer = .doubleword } };
+ pub const x17: Register = .{ .alias = .r17, .format = .{ .integer = .doubleword } };
+ pub const x18: Register = .{ .alias = .r18, .format = .{ .integer = .doubleword } };
+ pub const x19: Register = .{ .alias = .r19, .format = .{ .integer = .doubleword } };
+ pub const x20: Register = .{ .alias = .r20, .format = .{ .integer = .doubleword } };
+ pub const x21: Register = .{ .alias = .r21, .format = .{ .integer = .doubleword } };
+ pub const x22: Register = .{ .alias = .r22, .format = .{ .integer = .doubleword } };
+ pub const x23: Register = .{ .alias = .r23, .format = .{ .integer = .doubleword } };
+ pub const x24: Register = .{ .alias = .r24, .format = .{ .integer = .doubleword } };
+ pub const x25: Register = .{ .alias = .r25, .format = .{ .integer = .doubleword } };
+ pub const x26: Register = .{ .alias = .r26, .format = .{ .integer = .doubleword } };
+ pub const x27: Register = .{ .alias = .r27, .format = .{ .integer = .doubleword } };
+ pub const x28: Register = .{ .alias = .r28, .format = .{ .integer = .doubleword } };
+ pub const x29: Register = .{ .alias = .r29, .format = .{ .integer = .doubleword } };
+ pub const x30: Register = .{ .alias = .r30, .format = .{ .integer = .doubleword } };
+ pub const xzr: Register = .{ .alias = .zr, .format = .{ .integer = .doubleword } };
+ pub const sp: Register = .{ .alias = .sp, .format = .{ .integer = .doubleword } };
+
+ pub const w0: Register = .{ .alias = .r0, .format = .{ .integer = .word } };
+ pub const w1: Register = .{ .alias = .r1, .format = .{ .integer = .word } };
+ pub const w2: Register = .{ .alias = .r2, .format = .{ .integer = .word } };
+ pub const w3: Register = .{ .alias = .r3, .format = .{ .integer = .word } };
+ pub const w4: Register = .{ .alias = .r4, .format = .{ .integer = .word } };
+ pub const w5: Register = .{ .alias = .r5, .format = .{ .integer = .word } };
+ pub const w6: Register = .{ .alias = .r6, .format = .{ .integer = .word } };
+ pub const w7: Register = .{ .alias = .r7, .format = .{ .integer = .word } };
+ pub const w8: Register = .{ .alias = .r8, .format = .{ .integer = .word } };
+ pub const w9: Register = .{ .alias = .r9, .format = .{ .integer = .word } };
+ pub const w10: Register = .{ .alias = .r10, .format = .{ .integer = .word } };
+ pub const w11: Register = .{ .alias = .r11, .format = .{ .integer = .word } };
+ pub const w12: Register = .{ .alias = .r12, .format = .{ .integer = .word } };
+ pub const w13: Register = .{ .alias = .r13, .format = .{ .integer = .word } };
+ pub const w14: Register = .{ .alias = .r14, .format = .{ .integer = .word } };
+ pub const w15: Register = .{ .alias = .r15, .format = .{ .integer = .word } };
+ pub const w16: Register = .{ .alias = .r16, .format = .{ .integer = .word } };
+ pub const w17: Register = .{ .alias = .r17, .format = .{ .integer = .word } };
+ pub const w18: Register = .{ .alias = .r18, .format = .{ .integer = .word } };
+ pub const w19: Register = .{ .alias = .r19, .format = .{ .integer = .word } };
+ pub const w20: Register = .{ .alias = .r20, .format = .{ .integer = .word } };
+ pub const w21: Register = .{ .alias = .r21, .format = .{ .integer = .word } };
+ pub const w22: Register = .{ .alias = .r22, .format = .{ .integer = .word } };
+ pub const w23: Register = .{ .alias = .r23, .format = .{ .integer = .word } };
+ pub const w24: Register = .{ .alias = .r24, .format = .{ .integer = .word } };
+ pub const w25: Register = .{ .alias = .r25, .format = .{ .integer = .word } };
+ pub const w26: Register = .{ .alias = .r26, .format = .{ .integer = .word } };
+ pub const w27: Register = .{ .alias = .r27, .format = .{ .integer = .word } };
+ pub const w28: Register = .{ .alias = .r28, .format = .{ .integer = .word } };
+ pub const w29: Register = .{ .alias = .r29, .format = .{ .integer = .word } };
+ pub const w30: Register = .{ .alias = .r30, .format = .{ .integer = .word } };
+ pub const wzr: Register = .{ .alias = .zr, .format = .{ .integer = .word } };
+ pub const wsp: Register = .{ .alias = .sp, .format = .{ .integer = .word } };
+
+ pub const ip = x16;
+ pub const ip0 = x16;
+ pub const ip1 = x17;
+ pub const fp = x29;
+ pub const lr = x30;
+ pub const pc: Register = .{ .alias = .pc, .format = .{ .integer = .doubleword } };
+
+ pub const q0: Register = .{ .alias = .v0, .format = .{ .scalar = .quad } };
+ pub const q1: Register = .{ .alias = .v1, .format = .{ .scalar = .quad } };
+ pub const q2: Register = .{ .alias = .v2, .format = .{ .scalar = .quad } };
+ pub const q3: Register = .{ .alias = .v3, .format = .{ .scalar = .quad } };
+ pub const q4: Register = .{ .alias = .v4, .format = .{ .scalar = .quad } };
+ pub const q5: Register = .{ .alias = .v5, .format = .{ .scalar = .quad } };
+ pub const q6: Register = .{ .alias = .v6, .format = .{ .scalar = .quad } };
+ pub const q7: Register = .{ .alias = .v7, .format = .{ .scalar = .quad } };
+ pub const q8: Register = .{ .alias = .v8, .format = .{ .scalar = .quad } };
+ pub const q9: Register = .{ .alias = .v9, .format = .{ .scalar = .quad } };
+ pub const q10: Register = .{ .alias = .v10, .format = .{ .scalar = .quad } };
+ pub const q11: Register = .{ .alias = .v11, .format = .{ .scalar = .quad } };
+ pub const q12: Register = .{ .alias = .v12, .format = .{ .scalar = .quad } };
+ pub const q13: Register = .{ .alias = .v13, .format = .{ .scalar = .quad } };
+ pub const q14: Register = .{ .alias = .v14, .format = .{ .scalar = .quad } };
+ pub const q15: Register = .{ .alias = .v15, .format = .{ .scalar = .quad } };
+ pub const q16: Register = .{ .alias = .v16, .format = .{ .scalar = .quad } };
+ pub const q17: Register = .{ .alias = .v17, .format = .{ .scalar = .quad } };
+ pub const q18: Register = .{ .alias = .v18, .format = .{ .scalar = .quad } };
+ pub const q19: Register = .{ .alias = .v19, .format = .{ .scalar = .quad } };
+ pub const q20: Register = .{ .alias = .v20, .format = .{ .scalar = .quad } };
+ pub const q21: Register = .{ .alias = .v21, .format = .{ .scalar = .quad } };
+ pub const q22: Register = .{ .alias = .v22, .format = .{ .scalar = .quad } };
+ pub const q23: Register = .{ .alias = .v23, .format = .{ .scalar = .quad } };
+ pub const q24: Register = .{ .alias = .v24, .format = .{ .scalar = .quad } };
+ pub const q25: Register = .{ .alias = .v25, .format = .{ .scalar = .quad } };
+ pub const q26: Register = .{ .alias = .v26, .format = .{ .scalar = .quad } };
+ pub const q27: Register = .{ .alias = .v27, .format = .{ .scalar = .quad } };
+ pub const q28: Register = .{ .alias = .v28, .format = .{ .scalar = .quad } };
+ pub const q29: Register = .{ .alias = .v29, .format = .{ .scalar = .quad } };
+ pub const q30: Register = .{ .alias = .v30, .format = .{ .scalar = .quad } };
+ pub const q31: Register = .{ .alias = .v31, .format = .{ .scalar = .quad } };
+
+ pub const d0: Register = .{ .alias = .v0, .format = .{ .scalar = .double } };
+ pub const d1: Register = .{ .alias = .v1, .format = .{ .scalar = .double } };
+ pub const d2: Register = .{ .alias = .v2, .format = .{ .scalar = .double } };
+ pub const d3: Register = .{ .alias = .v3, .format = .{ .scalar = .double } };
+ pub const d4: Register = .{ .alias = .v4, .format = .{ .scalar = .double } };
+ pub const d5: Register = .{ .alias = .v5, .format = .{ .scalar = .double } };
+ pub const d6: Register = .{ .alias = .v6, .format = .{ .scalar = .double } };
+ pub const d7: Register = .{ .alias = .v7, .format = .{ .scalar = .double } };
+ pub const d8: Register = .{ .alias = .v8, .format = .{ .scalar = .double } };
+ pub const d9: Register = .{ .alias = .v9, .format = .{ .scalar = .double } };
+ pub const d10: Register = .{ .alias = .v10, .format = .{ .scalar = .double } };
+ pub const d11: Register = .{ .alias = .v11, .format = .{ .scalar = .double } };
+ pub const d12: Register = .{ .alias = .v12, .format = .{ .scalar = .double } };
+ pub const d13: Register = .{ .alias = .v13, .format = .{ .scalar = .double } };
+ pub const d14: Register = .{ .alias = .v14, .format = .{ .scalar = .double } };
+ pub const d15: Register = .{ .alias = .v15, .format = .{ .scalar = .double } };
+ pub const d16: Register = .{ .alias = .v16, .format = .{ .scalar = .double } };
+ pub const d17: Register = .{ .alias = .v17, .format = .{ .scalar = .double } };
+ pub const d18: Register = .{ .alias = .v18, .format = .{ .scalar = .double } };
+ pub const d19: Register = .{ .alias = .v19, .format = .{ .scalar = .double } };
+ pub const d20: Register = .{ .alias = .v20, .format = .{ .scalar = .double } };
+ pub const d21: Register = .{ .alias = .v21, .format = .{ .scalar = .double } };
+ pub const d22: Register = .{ .alias = .v22, .format = .{ .scalar = .double } };
+ pub const d23: Register = .{ .alias = .v23, .format = .{ .scalar = .double } };
+ pub const d24: Register = .{ .alias = .v24, .format = .{ .scalar = .double } };
+ pub const d25: Register = .{ .alias = .v25, .format = .{ .scalar = .double } };
+ pub const d26: Register = .{ .alias = .v26, .format = .{ .scalar = .double } };
+ pub const d27: Register = .{ .alias = .v27, .format = .{ .scalar = .double } };
+ pub const d28: Register = .{ .alias = .v28, .format = .{ .scalar = .double } };
+ pub const d29: Register = .{ .alias = .v29, .format = .{ .scalar = .double } };
+ pub const d30: Register = .{ .alias = .v30, .format = .{ .scalar = .double } };
+ pub const d31: Register = .{ .alias = .v31, .format = .{ .scalar = .double } };
+
+ pub const s0: Register = .{ .alias = .v0, .format = .{ .scalar = .single } };
+ pub const s1: Register = .{ .alias = .v1, .format = .{ .scalar = .single } };
+ pub const s2: Register = .{ .alias = .v2, .format = .{ .scalar = .single } };
+ pub const s3: Register = .{ .alias = .v3, .format = .{ .scalar = .single } };
+ pub const s4: Register = .{ .alias = .v4, .format = .{ .scalar = .single } };
+ pub const s5: Register = .{ .alias = .v5, .format = .{ .scalar = .single } };
+ pub const s6: Register = .{ .alias = .v6, .format = .{ .scalar = .single } };
+ pub const s7: Register = .{ .alias = .v7, .format = .{ .scalar = .single } };
+ pub const s8: Register = .{ .alias = .v8, .format = .{ .scalar = .single } };
+ pub const s9: Register = .{ .alias = .v9, .format = .{ .scalar = .single } };
+ pub const s10: Register = .{ .alias = .v10, .format = .{ .scalar = .single } };
+ pub const s11: Register = .{ .alias = .v11, .format = .{ .scalar = .single } };
+ pub const s12: Register = .{ .alias = .v12, .format = .{ .scalar = .single } };
+ pub const s13: Register = .{ .alias = .v13, .format = .{ .scalar = .single } };
+ pub const s14: Register = .{ .alias = .v14, .format = .{ .scalar = .single } };
+ pub const s15: Register = .{ .alias = .v15, .format = .{ .scalar = .single } };
+ pub const s16: Register = .{ .alias = .v16, .format = .{ .scalar = .single } };
+ pub const s17: Register = .{ .alias = .v17, .format = .{ .scalar = .single } };
+ pub const s18: Register = .{ .alias = .v18, .format = .{ .scalar = .single } };
+ pub const s19: Register = .{ .alias = .v19, .format = .{ .scalar = .single } };
+ pub const s20: Register = .{ .alias = .v20, .format = .{ .scalar = .single } };
+ pub const s21: Register = .{ .alias = .v21, .format = .{ .scalar = .single } };
+ pub const s22: Register = .{ .alias = .v22, .format = .{ .scalar = .single } };
+ pub const s23: Register = .{ .alias = .v23, .format = .{ .scalar = .single } };
+ pub const s24: Register = .{ .alias = .v24, .format = .{ .scalar = .single } };
+ pub const s25: Register = .{ .alias = .v25, .format = .{ .scalar = .single } };
+ pub const s26: Register = .{ .alias = .v26, .format = .{ .scalar = .single } };
+ pub const s27: Register = .{ .alias = .v27, .format = .{ .scalar = .single } };
+ pub const s28: Register = .{ .alias = .v28, .format = .{ .scalar = .single } };
+ pub const s29: Register = .{ .alias = .v29, .format = .{ .scalar = .single } };
+ pub const s30: Register = .{ .alias = .v30, .format = .{ .scalar = .single } };
+ pub const s31: Register = .{ .alias = .v31, .format = .{ .scalar = .single } };
+
+ pub const h0: Register = .{ .alias = .v0, .format = .{ .scalar = .half } };
+ pub const h1: Register = .{ .alias = .v1, .format = .{ .scalar = .half } };
+ pub const h2: Register = .{ .alias = .v2, .format = .{ .scalar = .half } };
+ pub const h3: Register = .{ .alias = .v3, .format = .{ .scalar = .half } };
+ pub const h4: Register = .{ .alias = .v4, .format = .{ .scalar = .half } };
+ pub const h5: Register = .{ .alias = .v5, .format = .{ .scalar = .half } };
+ pub const h6: Register = .{ .alias = .v6, .format = .{ .scalar = .half } };
+ pub const h7: Register = .{ .alias = .v7, .format = .{ .scalar = .half } };
+ pub const h8: Register = .{ .alias = .v8, .format = .{ .scalar = .half } };
+ pub const h9: Register = .{ .alias = .v9, .format = .{ .scalar = .half } };
+ pub const h10: Register = .{ .alias = .v10, .format = .{ .scalar = .half } };
+ pub const h11: Register = .{ .alias = .v11, .format = .{ .scalar = .half } };
+ pub const h12: Register = .{ .alias = .v12, .format = .{ .scalar = .half } };
+ pub const h13: Register = .{ .alias = .v13, .format = .{ .scalar = .half } };
+ pub const h14: Register = .{ .alias = .v14, .format = .{ .scalar = .half } };
+ pub const h15: Register = .{ .alias = .v15, .format = .{ .scalar = .half } };
+ pub const h16: Register = .{ .alias = .v16, .format = .{ .scalar = .half } };
+ pub const h17: Register = .{ .alias = .v17, .format = .{ .scalar = .half } };
+ pub const h18: Register = .{ .alias = .v18, .format = .{ .scalar = .half } };
+ pub const h19: Register = .{ .alias = .v19, .format = .{ .scalar = .half } };
+ pub const h20: Register = .{ .alias = .v20, .format = .{ .scalar = .half } };
+ pub const h21: Register = .{ .alias = .v21, .format = .{ .scalar = .half } };
+ pub const h22: Register = .{ .alias = .v22, .format = .{ .scalar = .half } };
+ pub const h23: Register = .{ .alias = .v23, .format = .{ .scalar = .half } };
+ pub const h24: Register = .{ .alias = .v24, .format = .{ .scalar = .half } };
+ pub const h25: Register = .{ .alias = .v25, .format = .{ .scalar = .half } };
+ pub const h26: Register = .{ .alias = .v26, .format = .{ .scalar = .half } };
+ pub const h27: Register = .{ .alias = .v27, .format = .{ .scalar = .half } };
+ pub const h28: Register = .{ .alias = .v28, .format = .{ .scalar = .half } };
+ pub const h29: Register = .{ .alias = .v29, .format = .{ .scalar = .half } };
+ pub const h30: Register = .{ .alias = .v30, .format = .{ .scalar = .half } };
+ pub const h31: Register = .{ .alias = .v31, .format = .{ .scalar = .half } };
+
+ pub const b0: Register = .{ .alias = .v0, .format = .{ .scalar = .byte } };
+ pub const b1: Register = .{ .alias = .v1, .format = .{ .scalar = .byte } };
+ pub const b2: Register = .{ .alias = .v2, .format = .{ .scalar = .byte } };
+ pub const b3: Register = .{ .alias = .v3, .format = .{ .scalar = .byte } };
+ pub const b4: Register = .{ .alias = .v4, .format = .{ .scalar = .byte } };
+ pub const b5: Register = .{ .alias = .v5, .format = .{ .scalar = .byte } };
+ pub const b6: Register = .{ .alias = .v6, .format = .{ .scalar = .byte } };
+ pub const b7: Register = .{ .alias = .v7, .format = .{ .scalar = .byte } };
+ pub const b8: Register = .{ .alias = .v8, .format = .{ .scalar = .byte } };
+ pub const b9: Register = .{ .alias = .v9, .format = .{ .scalar = .byte } };
+ pub const b10: Register = .{ .alias = .v10, .format = .{ .scalar = .byte } };
+ pub const b11: Register = .{ .alias = .v11, .format = .{ .scalar = .byte } };
+ pub const b12: Register = .{ .alias = .v12, .format = .{ .scalar = .byte } };
+ pub const b13: Register = .{ .alias = .v13, .format = .{ .scalar = .byte } };
+ pub const b14: Register = .{ .alias = .v14, .format = .{ .scalar = .byte } };
+ pub const b15: Register = .{ .alias = .v15, .format = .{ .scalar = .byte } };
+ pub const b16: Register = .{ .alias = .v16, .format = .{ .scalar = .byte } };
+ pub const b17: Register = .{ .alias = .v17, .format = .{ .scalar = .byte } };
+ pub const b18: Register = .{ .alias = .v18, .format = .{ .scalar = .byte } };
+ pub const b19: Register = .{ .alias = .v19, .format = .{ .scalar = .byte } };
+ pub const b20: Register = .{ .alias = .v20, .format = .{ .scalar = .byte } };
+ pub const b21: Register = .{ .alias = .v21, .format = .{ .scalar = .byte } };
+ pub const b22: Register = .{ .alias = .v22, .format = .{ .scalar = .byte } };
+ pub const b23: Register = .{ .alias = .v23, .format = .{ .scalar = .byte } };
+ pub const b24: Register = .{ .alias = .v24, .format = .{ .scalar = .byte } };
+ pub const b25: Register = .{ .alias = .v25, .format = .{ .scalar = .byte } };
+ pub const b26: Register = .{ .alias = .v26, .format = .{ .scalar = .byte } };
+ pub const b27: Register = .{ .alias = .v27, .format = .{ .scalar = .byte } };
+ pub const b28: Register = .{ .alias = .v28, .format = .{ .scalar = .byte } };
+ pub const b29: Register = .{ .alias = .v29, .format = .{ .scalar = .byte } };
+ pub const b30: Register = .{ .alias = .v30, .format = .{ .scalar = .byte } };
+ pub const b31: Register = .{ .alias = .v31, .format = .{ .scalar = .byte } };
+
+ pub const fpcr: Register = .{ .alias = .fpcr, .format = .{ .integer = .doubleword } };
+ pub const fpsr: Register = .{ .alias = .fpsr, .format = .{ .integer = .doubleword } };
+
+ pub const z0: Register = .{ .alias = .v0, .format = .{ .scalar = .scalable } };
+ pub const z1: Register = .{ .alias = .v1, .format = .{ .scalar = .scalable } };
+ pub const z2: Register = .{ .alias = .v2, .format = .{ .scalar = .scalable } };
+ pub const z3: Register = .{ .alias = .v3, .format = .{ .scalar = .scalable } };
+ pub const z4: Register = .{ .alias = .v4, .format = .{ .scalar = .scalable } };
+ pub const z5: Register = .{ .alias = .v5, .format = .{ .scalar = .scalable } };
+ pub const z6: Register = .{ .alias = .v6, .format = .{ .scalar = .scalable } };
+ pub const z7: Register = .{ .alias = .v7, .format = .{ .scalar = .scalable } };
+ pub const z8: Register = .{ .alias = .v8, .format = .{ .scalar = .scalable } };
+ pub const z9: Register = .{ .alias = .v9, .format = .{ .scalar = .scalable } };
+ pub const z10: Register = .{ .alias = .v10, .format = .{ .scalar = .scalable } };
+ pub const z11: Register = .{ .alias = .v11, .format = .{ .scalar = .scalable } };
+ pub const z12: Register = .{ .alias = .v12, .format = .{ .scalar = .scalable } };
+ pub const z13: Register = .{ .alias = .v13, .format = .{ .scalar = .scalable } };
+ pub const z14: Register = .{ .alias = .v14, .format = .{ .scalar = .scalable } };
+ pub const z15: Register = .{ .alias = .v15, .format = .{ .scalar = .scalable } };
+ pub const z16: Register = .{ .alias = .v16, .format = .{ .scalar = .scalable } };
+ pub const z17: Register = .{ .alias = .v17, .format = .{ .scalar = .scalable } };
+ pub const z18: Register = .{ .alias = .v18, .format = .{ .scalar = .scalable } };
+ pub const z19: Register = .{ .alias = .v19, .format = .{ .scalar = .scalable } };
+ pub const z20: Register = .{ .alias = .v20, .format = .{ .scalar = .scalable } };
+ pub const z21: Register = .{ .alias = .v21, .format = .{ .scalar = .scalable } };
+ pub const z22: Register = .{ .alias = .v22, .format = .{ .scalar = .scalable } };
+ pub const z23: Register = .{ .alias = .v23, .format = .{ .scalar = .scalable } };
+ pub const z24: Register = .{ .alias = .v24, .format = .{ .scalar = .scalable } };
+ pub const z25: Register = .{ .alias = .v25, .format = .{ .scalar = .scalable } };
+ pub const z26: Register = .{ .alias = .v26, .format = .{ .scalar = .scalable } };
+ pub const z27: Register = .{ .alias = .v27, .format = .{ .scalar = .scalable } };
+ pub const z28: Register = .{ .alias = .v28, .format = .{ .scalar = .scalable } };
+ pub const z29: Register = .{ .alias = .v29, .format = .{ .scalar = .scalable } };
+ pub const z30: Register = .{ .alias = .v30, .format = .{ .scalar = .scalable } };
+ pub const z31: Register = .{ .alias = .v31, .format = .{ .scalar = .scalable } };
+
+ pub const p0: Register = .{ .alias = .v0, .format = .{ .scalar = .predicate } };
+ pub const p1: Register = .{ .alias = .v1, .format = .{ .scalar = .predicate } };
+ pub const p2: Register = .{ .alias = .v2, .format = .{ .scalar = .predicate } };
+ pub const p3: Register = .{ .alias = .v3, .format = .{ .scalar = .predicate } };
+ pub const p4: Register = .{ .alias = .v4, .format = .{ .scalar = .predicate } };
+ pub const p5: Register = .{ .alias = .v5, .format = .{ .scalar = .predicate } };
+ pub const p6: Register = .{ .alias = .v6, .format = .{ .scalar = .predicate } };
+ pub const p7: Register = .{ .alias = .v7, .format = .{ .scalar = .predicate } };
+ pub const p8: Register = .{ .alias = .v8, .format = .{ .scalar = .predicate } };
+ pub const p9: Register = .{ .alias = .v9, .format = .{ .scalar = .predicate } };
+ pub const p10: Register = .{ .alias = .v10, .format = .{ .scalar = .predicate } };
+ pub const p11: Register = .{ .alias = .v11, .format = .{ .scalar = .predicate } };
+ pub const p12: Register = .{ .alias = .v12, .format = .{ .scalar = .predicate } };
+ pub const p13: Register = .{ .alias = .v13, .format = .{ .scalar = .predicate } };
+ pub const p14: Register = .{ .alias = .v14, .format = .{ .scalar = .predicate } };
+ pub const p15: Register = .{ .alias = .v15, .format = .{ .scalar = .predicate } };
+
+ pub const ffr: Register = .{ .alias = .ffr, .format = .{ .integer = .doubleword } };
+
+ pub const Encoded = enum(u5) {
+ _,
+
+ pub fn decodeInteger(enc: Encoded, sf_enc: IntegerSize, opts: struct { sp: bool = false }) Register {
+ return switch (sf_enc) {
+ .word => switch (@intFromEnum(enc)) {
+ 0 => .w0,
+ 1 => .w1,
+ 2 => .w2,
+ 3 => .w3,
+ 4 => .w4,
+ 5 => .w5,
+ 6 => .w6,
+ 7 => .w7,
+ 8 => .w8,
+ 9 => .w9,
+ 10 => .w10,
+ 11 => .w11,
+ 12 => .w12,
+ 13 => .w13,
+ 14 => .w14,
+ 15 => .w15,
+ 16 => .w16,
+ 17 => .w17,
+ 18 => .w18,
+ 19 => .w19,
+ 20 => .w20,
+ 21 => .w21,
+ 22 => .w22,
+ 23 => .w23,
+ 24 => .w24,
+ 25 => .w25,
+ 26 => .w26,
+ 27 => .w27,
+ 28 => .w28,
+ 29 => .w29,
+ 30 => .w30,
+ 31 => if (opts.sp) .wsp else .wzr,
+ },
+ .doubleword => switch (@intFromEnum(enc)) {
+ 0 => .x0,
+ 1 => .x1,
+ 2 => .x2,
+ 3 => .x3,
+ 4 => .x4,
+ 5 => .x5,
+ 6 => .x6,
+ 7 => .x7,
+ 8 => .x8,
+ 9 => .x9,
+ 10 => .x10,
+ 11 => .x11,
+ 12 => .x12,
+ 13 => .x13,
+ 14 => .x14,
+ 15 => .x15,
+ 16 => .x16,
+ 17 => .x17,
+ 18 => .x18,
+ 19 => .x19,
+ 20 => .x20,
+ 21 => .x21,
+ 22 => .x22,
+ 23 => .x23,
+ 24 => .x24,
+ 25 => .x25,
+ 26 => .x26,
+ 27 => .x27,
+ 28 => .x28,
+ 29 => .x29,
+ 30 => .x30,
+ 31 => if (opts.sp) .sp else .xzr,
+ },
+ };
+ }
+
+ pub fn decodeVector(enc: Encoded, vs_enc: VectorSize) Register {
+ return switch (vs_enc) {
+ .byte => switch (@intFromEnum(enc)) {
+ 0 => .b0,
+ 1 => .b1,
+ 2 => .b2,
+ 3 => .b3,
+ 4 => .b4,
+ 5 => .b5,
+ 6 => .b6,
+ 7 => .b7,
+ 8 => .b8,
+ 9 => .b9,
+ 10 => .b10,
+ 11 => .b11,
+ 12 => .b12,
+ 13 => .b13,
+ 14 => .b14,
+ 15 => .b15,
+ 16 => .b16,
+ 17 => .b17,
+ 18 => .b18,
+ 19 => .b19,
+ 20 => .b20,
+ 21 => .b21,
+ 22 => .b22,
+ 23 => .b23,
+ 24 => .b24,
+ 25 => .b25,
+ 26 => .b26,
+ 27 => .b27,
+ 28 => .b28,
+ 29 => .b29,
+ 30 => .b30,
+ 31 => .b31,
+ },
+ .half => switch (@intFromEnum(enc)) {
+ 0 => .h0,
+ 1 => .h1,
+ 2 => .h2,
+ 3 => .h3,
+ 4 => .h4,
+ 5 => .h5,
+ 6 => .h6,
+ 7 => .h7,
+ 8 => .h8,
+ 9 => .h9,
+ 10 => .h10,
+ 11 => .h11,
+ 12 => .h12,
+ 13 => .h13,
+ 14 => .h14,
+ 15 => .h15,
+ 16 => .h16,
+ 17 => .h17,
+ 18 => .h18,
+ 19 => .h19,
+ 20 => .h20,
+ 21 => .h21,
+ 22 => .h22,
+ 23 => .h23,
+ 24 => .h24,
+ 25 => .h25,
+ 26 => .h26,
+ 27 => .h27,
+ 28 => .h28,
+ 29 => .h29,
+ 30 => .h30,
+ 31 => .h31,
+ },
+ .single => switch (@intFromEnum(enc)) {
+ 0 => .s0,
+ 1 => .s1,
+ 2 => .s2,
+ 3 => .s3,
+ 4 => .s4,
+ 5 => .s5,
+ 6 => .s6,
+ 7 => .s7,
+ 8 => .s8,
+ 9 => .s9,
+ 10 => .s10,
+ 11 => .s11,
+ 12 => .s12,
+ 13 => .s13,
+ 14 => .s14,
+ 15 => .s15,
+ 16 => .s16,
+ 17 => .s17,
+ 18 => .s18,
+ 19 => .s19,
+ 20 => .s20,
+ 21 => .s21,
+ 22 => .s22,
+ 23 => .s23,
+ 24 => .s24,
+ 25 => .s25,
+ 26 => .s26,
+ 27 => .s27,
+ 28 => .s28,
+ 29 => .s29,
+ 30 => .s30,
+ 31 => .s31,
+ },
+ .double => switch (@intFromEnum(enc)) {
+ 0 => .d0,
+ 1 => .d1,
+ 2 => .d2,
+ 3 => .d3,
+ 4 => .d4,
+ 5 => .d5,
+ 6 => .d6,
+ 7 => .d7,
+ 8 => .d8,
+ 9 => .d9,
+ 10 => .d10,
+ 11 => .d11,
+ 12 => .d12,
+ 13 => .d13,
+ 14 => .d14,
+ 15 => .d15,
+ 16 => .d16,
+ 17 => .d17,
+ 18 => .d18,
+ 19 => .d19,
+ 20 => .d20,
+ 21 => .d21,
+ 22 => .d22,
+ 23 => .d23,
+ 24 => .d24,
+ 25 => .d25,
+ 26 => .d26,
+ 27 => .d27,
+ 28 => .d28,
+ 29 => .d29,
+ 30 => .d30,
+ 31 => .d31,
+ },
+ .quad => switch (@intFromEnum(enc)) {
+ 0 => .q0,
+ 1 => .q1,
+ 2 => .q2,
+ 3 => .q3,
+ 4 => .q4,
+ 5 => .q5,
+ 6 => .q6,
+ 7 => .q7,
+ 8 => .q8,
+ 9 => .q9,
+ 10 => .q10,
+ 11 => .q11,
+ 12 => .q12,
+ 13 => .q13,
+ 14 => .q14,
+ 15 => .q15,
+ 16 => .q16,
+ 17 => .q17,
+ 18 => .q18,
+ 19 => .q19,
+ 20 => .q20,
+ 21 => .q21,
+ 22 => .q22,
+ 23 => .q23,
+ 24 => .q24,
+ 25 => .q25,
+ 26 => .q26,
+ 27 => .q27,
+ 28 => .q28,
+ 29 => .q29,
+ 30 => .q30,
+ 31 => .q31,
+ },
+ .scalable => switch (@intFromEnum(enc)) {
+ 0 => .z0,
+ 1 => .z1,
+ 2 => .z2,
+ 3 => .z3,
+ 4 => .z4,
+ 5 => .z5,
+ 6 => .z6,
+ 7 => .z7,
+ 8 => .z8,
+ 9 => .z9,
+ 10 => .z10,
+ 11 => .z11,
+ 12 => .z12,
+ 13 => .z13,
+ 14 => .z14,
+ 15 => .z15,
+ 16 => .z16,
+ 17 => .z17,
+ 18 => .z18,
+ 19 => .z19,
+ 20 => .z20,
+ 21 => .z21,
+ 22 => .z22,
+ 23 => .z23,
+ 24 => .z24,
+ 25 => .z25,
+ 26 => .z26,
+ 27 => .z27,
+ 28 => .z28,
+ 29 => .z29,
+ 30 => .z30,
+ 31 => .z31,
+ },
+ .predicate => switch (@as(u4, @intCast(@intFromEnum(enc)))) {
+ 0 => .p0,
+ 1 => .p1,
+ 2 => .p2,
+ 3 => .p3,
+ 4 => .p4,
+ 5 => .p5,
+ 6 => .p6,
+ 7 => .p7,
+ 8 => .p8,
+ 9 => .p9,
+ 10 => .p10,
+ 11 => .p11,
+ 12 => .p12,
+ 13 => .p13,
+ 14 => .p14,
+ 15 => .p15,
+ },
+ };
+ }
+ };
+
+ /// One tag per set of aliasing registers.
+ pub const Alias = enum(u7) {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ zr,
+ sp,
+
+ pc,
+
+ v0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+
+ fpcr,
+ fpsr,
+
+ p0,
+ p1,
+ p2,
+ p3,
+ p4,
+ p5,
+ p6,
+ p7,
+ p8,
+ p9,
+ p10,
+ p11,
+ p12,
+ p13,
+ p14,
+ p15,
+
+ ffr,
+
+ pub const ip: Alias = .r16;
+ pub const ip0: Alias = .r16;
+ pub const ip1: Alias = .r17;
+ pub const fp: Alias = .r29;
+ pub const lr: Alias = .r30;
+
+ pub fn r(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.pc));
+ return .{ .alias = ra, .format = .alias };
+ }
+ pub fn x(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.sp));
+ return .{ .alias = ra, .format = .{ .integer = .doubleword } };
+ }
+ pub fn w(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.sp));
+ return .{ .alias = ra, .format = .{ .integer = .word } };
+ }
+ pub fn v(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .alias };
+ }
+ pub fn q(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .quad } };
+ }
+ pub fn d(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .double } };
+ }
+ pub fn s(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .single } };
+ }
+ pub fn h(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .half } };
+ }
+ pub fn b(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .byte } };
+ }
+ pub fn z(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .scalable } };
+ }
+ pub fn p(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.p0) and @intFromEnum(ra) <= @intFromEnum(Alias.p15));
+ return .{ .alias = ra, .format = .{ .scalar = .predicate } };
+ }
+ pub fn @"2d"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"2d" } };
+ }
+ pub fn @"4s"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"4s" } };
+ }
+ pub fn @"8h"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"8h" } };
+ }
+ pub fn @"16b"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"16b" } };
+ }
+ pub fn @"1d"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"1d" } };
+ }
+ pub fn @"2s"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"2s" } };
+ }
+ pub fn @"4h"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"4h" } };
+ }
+ pub fn @"8b"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"8b" } };
+ }
+ pub fn @"d[]"(ra: Alias, index: u1) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .double, .index = index } } };
+ }
+ pub fn @"s[]"(ra: Alias, index: u2) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .single, .index = index } } };
+ }
+ pub fn @"h[]"(ra: Alias, index: u3) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .half, .index = index } } };
+ }
+ pub fn @"b[]"(ra: Alias, index: u4) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .byte, .index = index } } };
+ }
+
+ pub fn isVector(ra: Alias) bool {
+ return switch (ra) {
+ .r0,
+ .r1,
+ .r2,
+ .r3,
+ .r4,
+ .r5,
+ .r6,
+ .r7,
+ .r8,
+ .r9,
+ .r10,
+ .r11,
+ .r12,
+ .r13,
+ .r14,
+ .r15,
+ .r16,
+ .r17,
+ .r18,
+ .r19,
+ .r20,
+ .r21,
+ .r22,
+ .r23,
+ .r24,
+ .r25,
+ .r26,
+ .r27,
+ .r28,
+ .r29,
+ .r30,
+ .zr,
+ .sp,
+
+ .pc,
+
+ .fpcr,
+ .fpsr,
+
+ .ffr,
+ => false,
+
+ .v0,
+ .v1,
+ .v2,
+ .v3,
+ .v4,
+ .v5,
+ .v6,
+ .v7,
+ .v8,
+ .v9,
+ .v10,
+ .v11,
+ .v12,
+ .v13,
+ .v14,
+ .v15,
+ .v16,
+ .v17,
+ .v18,
+ .v19,
+ .v20,
+ .v21,
+ .v22,
+ .v23,
+ .v24,
+ .v25,
+ .v26,
+ .v27,
+ .v28,
+ .v29,
+ .v30,
+ .v31,
+
+ .p0,
+ .p1,
+ .p2,
+ .p3,
+ .p4,
+ .p5,
+ .p6,
+ .p7,
+ .p8,
+ .p9,
+ .p10,
+ .p11,
+ .p12,
+ .p13,
+ .p14,
+ .p15,
+ => true,
+ };
+ }
+
+ pub fn encode(ra: Alias, comptime opts: struct { sp: bool = false, V: bool = false }) Encoded {
+ return @enumFromInt(@as(u5, switch (ra) {
+ .r0 => if (opts.V) unreachable else 0,
+ .r1 => if (opts.V) unreachable else 1,
+ .r2 => if (opts.V) unreachable else 2,
+ .r3 => if (opts.V) unreachable else 3,
+ .r4 => if (opts.V) unreachable else 4,
+ .r5 => if (opts.V) unreachable else 5,
+ .r6 => if (opts.V) unreachable else 6,
+ .r7 => if (opts.V) unreachable else 7,
+ .r8 => if (opts.V) unreachable else 8,
+ .r9 => if (opts.V) unreachable else 9,
+ .r10 => if (opts.V) unreachable else 10,
+ .r11 => if (opts.V) unreachable else 11,
+ .r12 => if (opts.V) unreachable else 12,
+ .r13 => if (opts.V) unreachable else 13,
+ .r14 => if (opts.V) unreachable else 14,
+ .r15 => if (opts.V) unreachable else 15,
+ .r16 => if (opts.V) unreachable else 16,
+ .r17 => if (opts.V) unreachable else 17,
+ .r18 => if (opts.V) unreachable else 18,
+ .r19 => if (opts.V) unreachable else 19,
+ .r20 => if (opts.V) unreachable else 20,
+ .r21 => if (opts.V) unreachable else 21,
+ .r22 => if (opts.V) unreachable else 22,
+ .r23 => if (opts.V) unreachable else 23,
+ .r24 => if (opts.V) unreachable else 24,
+ .r25 => if (opts.V) unreachable else 25,
+ .r26 => if (opts.V) unreachable else 26,
+ .r27 => if (opts.V) unreachable else 27,
+ .r28 => if (opts.V) unreachable else 28,
+ .r29 => if (opts.V) unreachable else 29,
+ .r30 => if (opts.V) unreachable else 30,
+ .zr => if (opts.sp or opts.V) unreachable else 31,
+ .sp => if (opts.sp and !opts.V) 31 else unreachable,
+ .pc => unreachable,
+ .v0 => if (opts.V) 0 else unreachable,
+ .v1 => if (opts.V) 1 else unreachable,
+ .v2 => if (opts.V) 2 else unreachable,
+ .v3 => if (opts.V) 3 else unreachable,
+ .v4 => if (opts.V) 4 else unreachable,
+ .v5 => if (opts.V) 5 else unreachable,
+ .v6 => if (opts.V) 6 else unreachable,
+ .v7 => if (opts.V) 7 else unreachable,
+ .v8 => if (opts.V) 8 else unreachable,
+ .v9 => if (opts.V) 9 else unreachable,
+ .v10 => if (opts.V) 10 else unreachable,
+ .v11 => if (opts.V) 11 else unreachable,
+ .v12 => if (opts.V) 12 else unreachable,
+ .v13 => if (opts.V) 13 else unreachable,
+ .v14 => if (opts.V) 14 else unreachable,
+ .v15 => if (opts.V) 15 else unreachable,
+ .v16 => if (opts.V) 16 else unreachable,
+ .v17 => if (opts.V) 17 else unreachable,
+ .v18 => if (opts.V) 18 else unreachable,
+ .v19 => if (opts.V) 19 else unreachable,
+ .v20 => if (opts.V) 20 else unreachable,
+ .v21 => if (opts.V) 21 else unreachable,
+ .v22 => if (opts.V) 22 else unreachable,
+ .v23 => if (opts.V) 23 else unreachable,
+ .v24 => if (opts.V) 24 else unreachable,
+ .v25 => if (opts.V) 25 else unreachable,
+ .v26 => if (opts.V) 26 else unreachable,
+ .v27 => if (opts.V) 27 else unreachable,
+ .v28 => if (opts.V) 28 else unreachable,
+ .v29 => if (opts.V) 29 else unreachable,
+ .v30 => if (opts.V) 30 else unreachable,
+ .v31 => if (opts.V) 31 else unreachable,
+ .fpcr, .fpsr => unreachable,
+ .p0, .p1, .p2, .p3, .p4, .p5, .p6, .p7, .p8, .p9, .p10, .p11, .p12, .p13, .p14, .p15 => unreachable,
+ .ffr => unreachable,
+ }));
+ }
+ };
+
+ pub fn isVector(reg: Register) bool {
+ return reg.alias.isVector();
+ }
+
+ pub fn size(reg: Register) ?u5 {
+ return format: switch (reg.format) {
+ .alias => unreachable,
+ .integer => |sf| switch (sf) {
+ .word => 4,
+ .doubleword => 8,
+ },
+ .vector => |vs| switch (vs) {
+ .byte => 1,
+ .word => 2,
+ .single => 4,
+ .double => 8,
+ .quad => 16,
+ .scalable, .predicate => null,
+ },
+ .arrangement => |arrangement| switch (arrangement) {
+ .@"2d", .@"4s", .@"8h", .@"16b" => 16,
+ .@"1d", .@"2s", .@"4h", .@"8b" => 8,
+ },
+ .element => |element| continue :format .{ .vector = element.size },
+ };
+ }
+
+ pub fn parse(reg: []const u8) ?Register {
+ return if (reg.len == 0) null else switch (std.ascii.toLower(reg[0])) {
+ else => null,
+ 'r' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .alias,
+ },
+ 31 => null,
+ } else |_| null,
+ 'x' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .{ .integer = .doubleword },
+ },
+ 31 => null,
+ } else |_| if (toLowerEqlAssertLower(reg, "xzr")) .xzr else null,
+ 'w' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .{ .integer = .word },
+ },
+ 31 => null,
+ } else |_| if (toLowerEqlAssertLower(reg, "wzr"))
+ .wzr
+ else if (toLowerEqlAssertLower(reg, "wsp"))
+ .wsp
+ else
+ null,
+ 'i' => return if (toLowerEqlAssertLower(reg, "ip") or toLowerEqlAssertLower(reg, "ip0"))
+ .ip0
+ else if (toLowerEqlAssertLower(reg, "ip1"))
+ .ip1
+ else
+ null,
+ 'f' => return if (toLowerEqlAssertLower(reg, "fp")) .fp else null,
+ 'p' => return if (toLowerEqlAssertLower(reg, "pc")) .pc else null,
+ 'v' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .alias,
+ } else |_| null,
+ 'q' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .quad },
+ } else |_| null,
+ 'd' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .double },
+ } else |_| null,
+ 's' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .single },
+ } else |_| if (toLowerEqlAssertLower(reg, "sp")) .sp else null,
+ 'h' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .half },
+ } else |_| null,
+ 'b' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .byte },
+ } else |_| null,
+ };
+ }
+
+ pub fn fmt(reg: Register) aarch64.Disassemble.RegisterFormatter {
+ return reg.fmtCase(.lower);
+ }
+ pub fn fmtCase(reg: Register, case: aarch64.Disassemble.Case) aarch64.Disassemble.RegisterFormatter {
+ return .{ .reg = reg, .case = case };
+ }
+
+ pub const System = packed struct(u16) {
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ op0: u2,
+
+ // D19.2 General system control registers
+ /// D19.2.1 ACCDATA_EL1, Accelerator Data
+ pub const accdata_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b101 };
+ /// D19.2.2 ACTLR_EL1, Auxiliary Control Register (EL1)
+ pub const actlr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.3 ACTLR_EL2, Auxiliary Control Register (EL2)
+ pub const actlr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.4 ACTLR_EL3, Auxiliary Control Register (EL3)
+ pub const actlr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.5 AFSR0_EL1, Auxiliary Fault Status Register 0 (EL1)
+ pub const afsr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.5 AFSR0_EL12, Auxiliary Fault Status Register 0 (EL12)
+ pub const afsr0_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.6 AFSR0_EL2, Auxiliary Fault Status Register 0 (EL2)
+ pub const afsr0_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.7 AFSR0_EL3, Auxiliary Fault Status Register 0 (EL3)
+ pub const afsr0_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.8 AFSR1_EL1, Auxiliary Fault Status Register 1 (EL1)
+ pub const afsr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.8 AFSR1_EL12, Auxiliary Fault Status Register 1 (EL12)
+ pub const afsr1_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.9 AFSR1_EL2, Auxiliary Fault Status Register 1 (EL2)
+ pub const afsr1_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.10 AFSR1_EL3, Auxiliary Fault Status Register 1 (EL3)
+ pub const afsr1_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0101, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.11 AIDR_EL1, Auxiliary ID Register
+ pub const aidr_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.12 AMAIR_EL1, Auxiliary Memory Attribute Indirection Register (EL1)
+ pub const amair_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.12 AMAIR_EL12, Auxiliary Memory Attribute Indirection Register (EL12)
+ pub const amair_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b1010, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.13 AMAIR_EL2, Auxiliary Memory Attribute Indirection Register (EL2)
+ pub const amair_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1010, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.14 AMAIR_EL3, Auxiliary Memory Attribute Indirection Register (EL3)
+ pub const amair_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1010, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.15 APDAKeyHi_EL1, Pointer Authentication Key A for Data (bits[127:64])
+ pub const apdakeyhi_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0010, .op2 = 0b001 };
+ /// D19.2.16 APDAKeyLo_EL1, Pointer Authentication Key A for Data (bits[63:0])
+ pub const apdakeylo_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.17 APDBKeyHi_EL1, Pointer Authentication Key B for Data (bits[127:64])
+ pub const apdbkeyhi_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0010, .op2 = 0b011 };
+ /// D19.2.18 APDAKeyHi_EL1, Pointer Authentication Key B for Data (bits[63:0])
+ pub const apdbkeylo_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0010, .op2 = 0b010 };
+ /// D19.2.19 APGAKeyHi_EL1, Pointer Authentication Key A for Code (bits[127:64])
+ pub const apgakeyhi_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0011, .op2 = 0b001 };
+ /// D19.2.20 APGAKeyLo_EL1, Pointer Authentication Key A for Code (bits[63:0])
+ pub const apgakeylo_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.21 APIAKeyHi_EL1, Pointer Authentication Key A for Instruction (bits[127:64])
+ pub const apiakeyhi_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.22 APIAKeyLo_EL1, Pointer Authentication Key A for Instruction (bits[63:0])
+ pub const apiakeylo_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.23 APIBKeyHi_EL1, Pointer Authentication Key B for Instruction (bits[127:64])
+ pub const apibkeyhi_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b011 };
+ /// D19.2.24 APIBKeyLo_EL1, Pointer Authentication Key B for Instruction (bits[63:0])
+ pub const apibkeylo_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b010 };
+ /// D19.2.25 CCSIDR2_EL1, Current Cache Size ID Register 2
+ pub const ccsidr2_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.26 CCSIDR_EL1, Current Cache Size ID Register
+ pub const ccsidr_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.27 CLIDR_EL1, Cache Level ID Register
+ pub const clidr_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.28 CONTEXTIDR_EL1, Context ID Register (EL1)
+ pub const contextidr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.28 CONTEXTIDR_EL12, Context ID Register (EL12)
+ pub const contextidr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.29 CONTEXTIDR_EL2, Context ID Register (EL2)
+ pub const contextidr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.30 CPACR_EL1, Architectural Feature Access Control Register
+ pub const cpacr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.30 CPACR_EL12, Architectural Feature Access Control Register
+ pub const cpacr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.31 CPACR_EL2, Architectural Feature Trap Register (EL2)
+ pub const cptr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b010 };
+ /// D19.2.32 CPACR_EL3, Architectural Feature Trap Register (EL3)
+ pub const cptr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b010 };
+ /// D19.2.33 CSSELR_EL1, Cache Size Selection Register
+ pub const csselr_el1: System = .{ .op0 = 0b11, .op1 = 0b010, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.34 CTR_EL0, Cache Type Register
+ pub const ctr_el0: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.35 DACR32_EL2, Domain Access Control Register
+ pub const dacr32_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0011, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.36 DCZID_EL0, Data Cache Zero ID Register
+ pub const dczid_el0: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.37 ESR_EL1, Exception Syndrome Register (EL1)
+ pub const esr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0101, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.37 ESR_EL12, Exception Syndrome Register (EL12)
+ pub const esr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0101, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.38 ESR_EL2, Exception Syndrome Register (EL2)
+ pub const esr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.39 ESR_EL3, Exception Syndrome Register (EL3)
+ pub const esr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0101, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.40 FAR_EL1, Fault Address Register (EL1)
+ pub const far_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0110, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.40 FAR_EL12, Fault Address Register (EL12)
+ pub const far_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0110, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.41 FAR_EL2, Fault Address Register (EL2)
+ pub const far_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0110, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.42 FAR_EL3, Fault Address Register (EL3)
+ pub const far_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0110, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.43 FPEXC32_EL2, Floating-Point Exception Control Register
+ pub const fpexc32_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.44 GCR_EL1, Tag Control Register
+ pub const gcr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b110 };
+ /// D19.2.45 GMID_EL1, Tag Control Register
+ pub const gmid_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b100 };
+ /// D19.2.46 HACR_EL2, Hypervisor Auxiliary Control Register
+ pub const hacr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b111 };
+ /// D19.2.47 HAFGRTR_EL2, Hypervisor Activity Monitors Fine-Grained Read Trap Register
+ pub const hafgrtr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0011, .CRm = 0b0001, .op2 = 0b110 };
+ /// D19.2.48 HCR_EL2, Hypervisor Configuration Register
+ pub const hcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.49 HCRX_EL2, Extended Hypervisor Configuration Register
+ pub const hcrx_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b010 };
+ /// D19.2.50 HDFGRTR_EL2, Hypervisor Debug Fine-Grained Read Trap Register
+ pub const hdfgrtr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0011, .CRm = 0b0001, .op2 = 0b100 };
+ /// D19.2.51 HDFGWTR_EL2, Hypervisor Debug Fine-Grained Write Trap Register
+ pub const hdfgwtr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0011, .CRm = 0b0001, .op2 = 0b101 };
+ /// D19.2.52 HFGITR_EL2, Hypervisor Fine-Grained Instruction Trap Register
+ pub const hfgitr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b110 };
+ /// D19.2.53 HFGRTR_EL2, Hypervisor Fine-Grained Read Trap Register
+ pub const hfgrtr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b100 };
+ /// D19.2.54 HFGWTR_EL2, Hypervisor Fine-Grained Write Trap Register
+ pub const hfgwtr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b101 };
+ /// D19.2.55 HPFAR_EL2, Hypervisor IPA Fault Address Register
+ pub const hpfar_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0110, .CRm = 0b0000, .op2 = 0b100 };
+ /// D19.2.56 HSTR_EL2, Hypervisor System Trap Register
+ pub const hstr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b011 };
+ /// D19.2.57 ID_AA64AFR0_EL1, AArch64 Auxiliary Feature Register 0
+ pub const id_aa64afr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0101, .op2 = 0b100 };
+ /// D19.2.58 ID_AA64AFR1_EL1, AArch64 Auxiliary Feature Register 1
+ pub const id_aa64afr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0101, .op2 = 0b101 };
+ /// D19.2.59 ID_AA64DFR0_EL1, AArch64 Debug Feature Register 0
+ pub const id_aa64dfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0101, .op2 = 0b000 };
+ /// D19.2.60 ID_AA64DFR1_EL1, AArch64 Debug Feature Register 1
+ pub const id_aa64dfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0101, .op2 = 0b001 };
+ /// D19.2.61 ID_AA64ISAR0_EL1, AArch64 Instruction Set Attribute Register 0
+ pub const id_aa64isar0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.62 ID_AA64ISAR1_EL1, AArch64 Instruction Set Attribute Register 1
+ pub const id_aa64isar1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0110, .op2 = 0b001 };
+ /// D19.2.63 ID_AA64ISAR2_EL1, AArch64 Instruction Set Attribute Register 2
+ pub const id_aa64isar2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0110, .op2 = 0b010 };
+ /// D19.2.64 ID_AA64MMFR0_EL1, AArch64 Memory Model Feature Register 0
+ pub const id_aa64mmfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0111, .op2 = 0b000 };
+ /// D19.2.65 ID_AA64MMFR1_EL1, AArch64 Memory Model Feature Register 1
+ pub const id_aa64mmfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0111, .op2 = 0b001 };
+ /// D19.2.66 ID_AA64MMFR2_EL1, AArch64 Memory Model Feature Register 2
+ pub const id_aa64mmfr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0111, .op2 = 0b010 };
+ /// D19.2.67 ID_AA64MMFR3_EL1, AArch64 Memory Model Feature Register 3
+ pub const id_aa64mmfr3_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0111, .op2 = 0b011 };
+ /// D19.2.68 ID_AA64MMFR4_EL1, AArch64 Memory Model Feature Register 4
+ pub const id_aa64mmfr4_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0111, .op2 = 0b100 };
+ /// D19.2.69 ID_AA64PFR0_EL1, AArch64 Processor Feature Register 0
+ pub const id_aa64pfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0100, .op2 = 0b000 };
+ /// D19.2.70 ID_AA64PFR1_EL1, AArch64 Processor Feature Register 1
+ pub const id_aa64pfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0100, .op2 = 0b001 };
+ /// D19.2.71 ID_AA64PFR2_EL1, AArch64 Processor Feature Register 2
+ pub const id_aa64pfr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0100, .op2 = 0b010 };
+ /// D19.2.72 ID_AA64SMFR0_EL1, SME Feature ID Register 0
+ pub const id_aa64smfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0100, .op2 = 0b101 };
+ /// D19.2.73 ID_AA64ZFR0_EL1, SVE Feature ID Register 0
+ pub const id_aa64zfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0100, .op2 = 0b100 };
+ /// D19.2.74 ID_AFR0_EL1, AArch32 Auxiliary Feature Register 0
+ pub const id_afr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b011 };
+ /// D19.2.75 ID_DFR0_EL1, AArch32 Debug Feature Register 0
+ pub const id_dfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b010 };
+ /// D19.2.76 ID_DFR1_EL1, AArch32 Debug Feature Register 1
+ pub const id_dfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b101 };
+ /// D19.2.77 ID_ISAR0_EL1, AArch32 Instruction Set Attribute Register 0
+ pub const id_isar0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.78 ID_ISAR1_EL1, AArch32 Instruction Set Attribute Register 1
+ pub const id_isar1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b001 };
+ /// D19.2.79 ID_ISAR2_EL1, AArch32 Instruction Set Attribute Register 2
+ pub const id_isar2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b010 };
+ /// D19.2.80 ID_ISAR3_EL1, AArch32 Instruction Set Attribute Register 3
+ pub const id_isar3_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b011 };
+ /// D19.2.81 ID_ISAR4_EL1, AArch32 Instruction Set Attribute Register 4
+ pub const id_isar4_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b100 };
+ /// D19.2.82 ID_ISAR5_EL1, AArch32 Instruction Set Attribute Register 5
+ pub const id_isar5_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b101 };
+ /// D19.2.83 ID_ISAR6_EL1, AArch32 Instruction Set Attribute Register 6
+ pub const id_isar6_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b111 };
+ /// D19.2.84 ID_MMFR0_EL1, AArch32 Memory Model Feature Register 0
+ pub const id_mmfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b100 };
+ /// D19.2.85 ID_MMFR1_EL1, AArch32 Memory Model Feature Register 1
+ pub const id_mmfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b101 };
+ /// D19.2.86 ID_MMFR2_EL1, AArch32 Memory Model Feature Register 2
+ pub const id_mmfr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b110 };
+ /// D19.2.87 ID_MMFR3_EL1, AArch32 Memory Model Feature Register 3
+ pub const id_mmfr3_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b111 };
+ /// D19.2.88 ID_MMFR4_EL1, AArch32 Memory Model Feature Register 4
+ pub const id_mmfr4_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0010, .op2 = 0b110 };
+ /// D19.2.89 ID_MMFR5_EL1, AArch32 Memory Model Feature Register 5
+ pub const id_mmfr5_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b110 };
+ /// D19.2.90 ID_PFR0_EL1, AArch32 Processor Feature Register 0
+ pub const id_pfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.91 ID_PFR1_EL1, AArch32 Processor Feature Register 1
+ pub const id_pfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0001, .op2 = 0b001 };
+ /// D19.2.92 ID_PFR2_EL1, AArch32 Processor Feature Register 2
+ pub const id_pfr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b100 };
+ /// D19.2.93 IFSR32_EL2, Instruction Fault Status Register (EL2)
+ pub const ifsr32_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.94 ISR_EL1, Interrupt Status Register
+ pub const isr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1100, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.95 LORC_EL1, LORegion Control (EL1)
+ pub const lorc_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0100, .op2 = 0b011 };
+ /// D19.2.96 LOREA_EL1, LORegion End Address (EL1)
+ pub const lorea_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0100, .op2 = 0b001 };
+ /// D19.2.97 SORID_EL1, LORegionID (EL1)
+ pub const lorid_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0100, .op2 = 0b111 };
+ /// D19.2.98 LORN_EL1, LORegion Number (EL1)
+ pub const lorn_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0100, .op2 = 0b010 };
+ /// D19.2.99 LORSA_EL1, LORegion Start Address (EL1)
+ pub const lorsa_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0100, .op2 = 0b000 };
+ /// D19.2.100 MAIR_EL1, Memory Attribute Indirection Register (EL1)
+ pub const mair_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.100 MAIR_EL12, Memory Attribute Indirection Register (EL12)
+ pub const mair_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b1010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.101 MAIR_EL2, Memory Attribute Indirection Register (EL2)
+ pub const mair_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.102 MAIR_EL3, Memory Attribute Indirection Register (EL3)
+ pub const mair_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.103 MIDR_EL1, Main ID Register
+ pub const midr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.104 MPIDR_EL1, Multiprocessor Affinity Register
+ pub const mpidr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b101 };
+ /// D19.2.105 MVFR0_EL1, AArch32 Media and VFP Feature Register 0
+ pub const mvfr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b000 };
+ /// D19.2.106 MVFR1_EL1, AArch32 Media and VFP Feature Register 1
+ pub const mvfr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b001 };
+ /// D19.2.107 MVFR2_EL1, AArch32 Media and VFP Feature Register 2
+ pub const mvfr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0011, .op2 = 0b010 };
+ /// D19.2.108 PAR_EL1, Physical Address Register
+ pub const par_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0111, .CRm = 0b0100, .op2 = 0b000 };
+ /// D19.2.109 REVIDR_EL1, Revision ID Register
+ pub const revidr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b110 };
+ /// D19.2.110 RGSR_EL1, Random Allocation Tag Seed Register
+ pub const rgsr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b101 };
+ /// D19.2.111 RMR_EL1, Reset Management Register (EL1)
+ pub const rmr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.112 RMR_EL2, Reset Management Register (EL2)
+ pub const rmr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.113 RMR_EL3, Reset Management Register (EL3)
+ pub const rmr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.114 RNDR, Random Number
+ pub const rndr: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b0010, .CRm = 0b0100, .op2 = 0b000 };
+ /// D19.2.115 RNDRRS, Reseeded Random Number
+ pub const rndrrs: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b0010, .CRm = 0b0100, .op2 = 0b001 };
+ /// D19.2.116 RVBAR_EL1, Reset Vector Base Address Register (if EL2 and EL3 not implemented)
+ pub const rvbar_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.117 RVBAR_EL2, Reset Vector Base Address Register (if EL3 not implemented)
+ pub const rvbar_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.118 RVBAR_EL3, Reset Vector Base Address Register (if EL3 implemented)
+ pub const rvbar_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.120 SCR_EL3, Secure Configuration Register
+ pub const scr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.121 SCTLR2_EL1, System Control Register (EL1)
+ pub const sctlr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.121 SCTLR2_EL12, System Control Register (EL12)
+ pub const sctlr2_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.122 SCTLR2_EL2, System Control Register (EL2)
+ pub const sctlr2_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.123 SCTLR2_EL3, System Control Register (EL3)
+ pub const sctlr2_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.124 SCTLR_EL1, System Control Register (EL1)
+ pub const sctlr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.124 SCTLR_EL12, System Control Register (EL12)
+ pub const sctlr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.125 SCTLR_EL2, System Control Register (EL2)
+ pub const sctlr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.126 SCTLR_EL3, System Control Register (EL3)
+ pub const sctlr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.127 SCXTNUM_EL0, EL0 Read/Write Software Context Number
+ pub const scxtnum_el0: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.128 SCXTNUM_EL1, EL1 Read/Write Software Context Number
+ pub const scxtnum_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.128 SCXTNUM_EL12, EL12 Read/Write Software Context Number
+ pub const scxtnum_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.129 SCXTNUM_EL2, EL2 Read/Write Software Context Number
+ pub const scxtnum_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.130 SCXTNUM_EL3, EL3 Read/Write Software Context Number
+ pub const scxtnum_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b111 };
+ /// D19.2.131 SMCR_EL1, SME Control Register (EL1)
+ pub const smcr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b110 };
+ /// D19.2.131 SMCR_EL12, SME Control Register (EL12)
+ pub const smcr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b110 };
+ /// D19.2.132 SMCR_EL2, SME Control Register (EL2)
+ pub const smcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b110 };
+ /// D19.2.133 SMCR_EL3, SME Control Register (EL3)
+ pub const smcr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b110 };
+ /// D19.2.134 SMIDR_EL1, Streaming Mode Identification Register
+ pub const smidr_el1: System = .{ .op0 = 0b11, .op1 = 0b001, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b110 };
+ /// D19.2.135 SMPRIMAP_EL2, Streaming Mode Priority Mapping Register
+ pub const smprimap_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b101 };
+ /// D19.2.136 SMPRI_EL1, Streaming Mode Priority Register
+ pub const smpri_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b100 };
+ /// D19.2.137 TCR2_EL1, Extended Translation Control Register (EL1)
+ pub const tcr2_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.137 TCR2_EL12, Extended Translation Control Register (EL12)
+ pub const tcr2_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.138 TCR2_EL2, Extended Translation Control Register (EL2)
+ pub const tcr2_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.139 TCR_EL1, Translation Control Register (EL1)
+ pub const tcr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.139 TCR_EL12, Translation Control Register (EL12)
+ pub const tcr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.140 TCR_EL2, Translation Control Register (EL2)
+ pub const tcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.141 TCR_EL3, Translation Control Register (EL3)
+ pub const tcr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.142 TFSRE0_EL1, Tag Fault Status Register (EL0)
+ pub const tfsre0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0101, .CRm = 0b0110, .op2 = 0b001 };
+ /// D19.2.143 TFSR_EL1, Tag Fault Status Register (EL1)
+ pub const tfsr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0101, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.143 TFSR_EL12, Tag Fault Status Register (EL12)
+ pub const tfsr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0101, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.144 TFSR_EL2, Tag Fault Status Register (EL2)
+ pub const tfsr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0101, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.145 TFSR_EL3, Tag Fault Status Register (EL3)
+ pub const tfsr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0101, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.146 TPIDR2_EL0, EL0 Read/Write Software Thread ID Register 2
+ pub const tpidr2_el0: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b101 };
+ /// D19.2.147 TPIDR_EL0, EL0 Read/Write Software Thread ID Register
+ pub const tpidr_el0: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.148 TPIDR_EL1, EL1 Read/Write Software Thread ID Register
+ pub const tpidr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b100 };
+ /// D19.2.149 TPIDR_EL2, EL2 Read/Write Software Thread ID Register
+ pub const tpidr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.150 TPIDR_EL3, EL3 Read/Write Software Thread ID Register
+ pub const tpidr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b010 };
+ /// D19.2.151 TPIDRRO_EL0, EL0 Read-Only Software Thread ID Register
+ pub const tpidrro_el3: System = .{ .op0 = 0b11, .op1 = 0b011, .CRn = 0b1101, .CRm = 0b0000, .op2 = 0b011 };
+ /// D19.2.152 TTBR0_EL1, Translation Table Base Register 0 (EL1)
+ pub const ttbr0_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.152 TTBR0_EL12, Translation Table Base Register 0 (EL12)
+ pub const ttbr0_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.153 TTBR0_EL2, Translation Table Base Register 0 (EL2)
+ pub const ttbr0_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.154 TTBR0_EL3, Translation Table Base Register 0 (EL3)
+ pub const ttbr0_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.155 TTBR1_EL1, Translation Table Base Register 1 (EL1)
+ pub const ttbr1_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.155 TTBR1_EL12, Translation Table Base Register 1 (EL12)
+ pub const ttbr1_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.156 TTBR1_EL2, Translation Table Base Register 1 (EL2)
+ pub const ttbr1_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0000, .op2 = 0b001 };
+ /// D19.2.157 VBAR_EL1, Vector Base Address Register (EL1)
+ pub const vbar_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.157 VBAR_EL12, Vector Base Address Register (EL12)
+ pub const vbar_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.158 VBAR_EL2, Vector Base Address Register (EL2)
+ pub const vbar_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.159 VBAR_EL3, Vector Base Address Register (EL3)
+ pub const vbar_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b1100, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.160 VMPIDR_EL2, Virtualization Multiprocessor ID Register
+ pub const vmpidr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b101 };
+ /// D19.2.161 VNCR_EL2, Virtual Nested Control Register
+ pub const nvcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.162 VPIDR_EL2, Virtualization Processor ID Register
+ pub const vpidr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0000, .CRm = 0b0000, .op2 = 0b000 };
+ /// D19.2.163 VSTCR_EL2, Virtualization Secure Translation Control Register
+ pub const vstcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0110, .op2 = 0b010 };
+ /// D19.2.164 VSTTBR_EL2, Virtualization Secure Translation Table Base Register
+ pub const vsttbr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0110, .op2 = 0b000 };
+ /// D19.2.165 VTCR_EL2, Virtualization Translation Control Register
+ pub const vtcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b010 };
+ /// D19.2.166 VTTBR_EL2, Virtualization Translation Table Base Register
+ pub const vttbr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0010, .CRm = 0b0001, .op2 = 0b000 };
+ /// D19.2.167 ZCR_EL1, SVE Control Register (EL1)
+ pub const zcr_el1: System = .{ .op0 = 0b11, .op1 = 0b000, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.167 ZCR_EL12, SVE Control Register (EL12)
+ pub const zcr_el12: System = .{ .op0 = 0b11, .op1 = 0b101, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.168 ZCR_EL2, SVE Control Register (EL2)
+ pub const zcr_el2: System = .{ .op0 = 0b11, .op1 = 0b100, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b000 };
+ /// D19.2.169 ZCR_EL3, SVE Control Register (EL3)
+ pub const zcr_el3: System = .{ .op0 = 0b11, .op1 = 0b110, .CRn = 0b0001, .CRm = 0b0010, .op2 = 0b000 };
+
+ pub fn parse(reg: []const u8) ?System {
+ if (reg.len >= 10 and std.ascii.toLower(reg[0]) == 's') encoded: {
+ var symbol_it = std.mem.splitScalar(u8, reg[1..], '_');
+ const op0 = std.fmt.parseInt(u2, symbol_it.next() orelse break :encoded, 10) catch break :encoded;
+ if (op0 < 0b10) break :encoded;
+ const op1 = std.fmt.parseInt(u3, symbol_it.next() orelse break :encoded, 10) catch break :encoded;
+ const n = symbol_it.next() orelse break :encoded;
+ if (n.len == 0 or std.ascii.toLower(n[0]) != 'c') break :encoded;
+ const CRn = std.fmt.parseInt(u4, n[1..], 10) catch break :encoded;
+ const m = symbol_it.next() orelse break :encoded;
+ if (m.len == 0 or std.ascii.toLower(m[0]) != 'c') break :encoded;
+ const CRm = std.fmt.parseInt(u4, m[1..], 10) catch break :encoded;
+ const op2 = std.fmt.parseInt(u3, symbol_it.next() orelse break :encoded, 10) catch break :encoded;
+ if (symbol_it.next() != null) break :encoded;
+ return .{ .op0 = op0, .op1 = op1, .CRn = CRn, .CRm = CRm, .op2 = op2 };
+ }
+ inline for (@typeInfo(System).@"struct".decls) |decl| {
+ if (@TypeOf(@field(System, decl.name)) != System) continue;
+ if (toLowerEqlAssertLower(reg, decl.name)) return @field(System, decl.name);
+ }
+ return null;
+ }
+ };
+
+ fn toLowerEqlAssertLower(lhs: []const u8, rhs: []const u8) bool {
+ if (lhs.len != rhs.len) return false;
+ for (lhs, rhs) |l, r| {
+ assert(!std.ascii.isUpper(r));
+ if (std.ascii.toLower(l) != r) return false;
+ }
+ return true;
+ }
+};
+
+/// C1.2.4 Condition code
+pub const ConditionCode = enum(u4) {
+ /// integer: Equal
+ /// floating-point: Equal
+ /// Z == 1
+ eq = 0b0000,
+ /// integer: Not equal
+ /// floating-point: Not equal or unordered
+ /// Z == 0
+ ne = 0b0001,
+ /// integer: Unsigned higher or same
+ /// floating-point: Greater than, equal, or unordered
+ /// C == 1
+ hs = 0b0010,
+ /// integer: Unsigned lower
+ /// floating-point: Less than
+ /// C == 0
+ lo = 0b0011,
+ /// integer: Minus, negative
+ /// floating-point: Less than
+ /// N == 1
+ mi = 0b0100,
+ /// integer: Plus, positive or zero
+ /// floating-point: Greater than, equal, or unordered
+ /// N == 0
+ pl = 0b0101,
+ /// integer: Overflow
+ /// floating-point: Unordered
+ /// V == 1
+ vs = 0b0110,
+ /// integer: No overflow
+ /// floating-point: Ordered
+ /// V == 0
+ vc = 0b0111,
+ /// integer: Unsigned higher
+ /// floating-point: Greater than, or unordered
+ /// C == 1 and Z == 0
+ hi = 0b1000,
+ /// integer: Unsigned lower or same
+ /// floating-point: Less than or equal
+ /// C == 0 or Z == 1
+ ls = 0b1001,
+ /// integer: Signed greater than or equal
+ /// floating-point: Greater than or equal
+ /// N == V
+ ge = 0b1010,
+ /// integer: Signed less than
+ /// floating-point: Less than, or unordered
+ /// N != V
+ lt = 0b1011,
+ /// integer: Signed greater than
+ /// floating-point: Greater than
+ /// Z == 0 and N == V
+ gt = 0b1100,
+ /// integer: Signed less than or equal
+ /// floating-point: Less than, equal, or unordered
+ /// Z == 1 or N != V
+ le = 0b1101,
+ /// integer: Always
+ /// floating-point: Always
+ /// true
+ al = 0b1110,
+ /// integer: Always
+ /// floating-point: Always
+ /// true
+ nv = 0b1111,
+ /// Carry set
+ /// C == 1
+ pub const cs: ConditionCode = .hs;
+ /// Carry clear
+ /// C == 0
+ pub const cc: ConditionCode = .lo;
+
+ pub fn invert(cond: ConditionCode) ConditionCode {
+ return @enumFromInt(@intFromEnum(cond) ^ 0b0001);
+ }
+};
+
+/// C4.1 A64 instruction set encoding
+pub const Instruction = packed union {
+ group: Group,
+ reserved: Reserved,
+ sme: Sme,
+ sve: Sve,
+ data_processing_immediate: DataProcessingImmediate,
+ branch_exception_generating_system: BranchExceptionGeneratingSystem,
+ load_store: LoadStore,
+ data_processing_register: DataProcessingRegister,
+ data_processing_vector: DataProcessingVector,
+
+ /// Table C4-1 Main encoding table for the A64 instruction set
+ pub const Group = packed struct {
+ encoded0: u25,
+ op1: u4,
+ encoded29: u2,
+ op0: u1,
+ };
+
+ /// C4.1.1 Reserved
+ pub const Reserved = packed union {
+ group: @This().Group,
+ udf: Udf,
+
+ /// Table C4-2 Encoding table for the Reserved group
+ pub const Group = packed struct {
+ encoded0: u16,
+ op1: u9,
+ decoded25: u4 = 0b0000,
+ op0: u2,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C6.2.387 UDF
+ pub const Udf = packed struct {
+ imm16: u16,
+ decoded16: u16 = 0b0000000000000000,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ udf: Udf,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b00 => switch (inst.group.op1) {
+ 0b000000000 => .{ .udf = inst.udf },
+ else => .unallocated,
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ /// C4.1.2 SME encodings
+ pub const Sme = packed union {
+ group: @This().Group,
+
+ /// Table C4-3 Encodings table for the SME encodings group
+ pub const Group = packed struct {
+ encoded0: u2,
+ op2: u3,
+ encoded5: u5,
+ op1: u15,
+ decoded25: u4 = 0b0000,
+ op0: u2,
+ decoded31: u1 = 0b1,
+ };
+ };
+
+ /// C4.1.30 SVE encodings
+ pub const Sve = packed union {
+ group: @This().Group,
+
+ /// Table C4-31 Encoding table for the SVE encodings group
+ pub const Group = packed struct {
+ encoded0: u4,
+ op2: u1,
+ encoded5: u5,
+ op1: u15,
+ decoded25: u4 = 0b0010,
+ op0: u3,
+ };
+ };
+
+ /// C4.1.86 Data Processing -- Immediate
+ pub const DataProcessingImmediate = packed union {
+ group: @This().Group,
+ pc_relative_addressing: PcRelativeAddressing,
+ add_subtract_immediate: AddSubtractImmediate,
+ add_subtract_immediate_with_tags: AddSubtractImmediateWithTags,
+ logical_immediate: LogicalImmediate,
+ move_wide_immediate: MoveWideImmediate,
+ bitfield: Bitfield,
+ extract: Extract,
+
+ /// Table C4-87 Encoding table for the Data Processing -- Immediate group
+ pub const Group = packed struct {
+ encoded0: u23,
+ op0: u3,
+ decoded26: u3 = 0b100,
+ encoded29: u3,
+ };
+
+ /// PC-rel. addressing
+ pub const PcRelativeAddressing = packed union {
+ group: @This().Group,
+ adr: Adr,
+ adrp: Adrp,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op,
+ };
+
+ /// C6.2.10 ADR
+ pub const Adr = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op = .adr,
+ };
+
+ /// C6.2.11 ADRP
+ pub const Adrp = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op = .adrp,
+ };
+
+ pub const Op = enum(u1) {
+ adr = 0b0,
+ adrp = 0b1,
+ };
+ };
+
+ /// Add/subtract (immediate)
+ pub const AddSubtractImmediate = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.4 ADD (immediate)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.8 ADDS (immediate)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.357 SUB (immediate)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.363 SUBS (immediate)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Shift = enum(u1) {
+ @"0" = 0b0,
+ @"12" = 0b1,
+ };
+ };
+
+ /// Add/subtract (immediate, with tags)
+ pub const AddSubtractImmediateWithTags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ uimm4: u4,
+ op3: u2,
+ uimm6: u6,
+ o2: u1,
+ decoded23: u6 = 0b100011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Logical (immediate)
+ pub const LogicalImmediate = packed union {
+ group: @This().Group,
+ @"and": And,
+ orr: Orr,
+ eor: Eor,
+ ands: Ands,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.12 AND (immediate)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.240 ORR (immediate)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.119 EOR (immediate)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.14 ANDS (immediate)
+ pub const Ands = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ @"and": And,
+ orr: Orr,
+ eor: Eor,
+ ands: Ands,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (!inst.group.imm.validImmediate(inst.group.sf))
+ .unallocated
+ else switch (inst.group.opc) {
+ .@"and" => .{ .@"and" = inst.@"and" },
+ .orr => .{ .orr = inst.orr },
+ .eor => .{ .eor = inst.eor },
+ .ands => .{ .ands = inst.ands },
+ };
+ }
+ };
+
+ /// Move wide (immediate)
+ pub const MoveWideImmediate = packed union {
+ group: @This().Group,
+ movn: Movn,
+ movz: Movz,
+ movk: Movk,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.226 MOVN
+ pub const Movn = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.227 MOVZ
+ pub const Movz = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movz,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.225 MOVK
+ pub const Movk = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movk,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Hw = enum(u2) {
+ @"0" = 0b00,
+ @"16" = 0b01,
+ @"32" = 0b10,
+ @"48" = 0b11,
+
+ pub fn int(hw: Hw) u6 {
+ return switch (hw) {
+ .@"0" => 0,
+ .@"16" => 16,
+ .@"32" => 32,
+ .@"48" => 48,
+ };
+ }
+
+ pub fn sf(hw: Hw) Register.IntegerSize {
+ return switch (hw) {
+ .@"0", .@"16" => .word,
+ .@"32", .@"48" => .doubleword,
+ };
+ }
+ };
+
+ pub const Opc = enum(u2) {
+ movn = 0b00,
+ movz = 0b10,
+ movk = 0b11,
+ _,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ movn: Movn,
+ movz: Movz,
+ movk: Movk,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (inst.group.sf == .word and inst.group.hw.sf() == .doubleword)
+ .unallocated
+ else switch (inst.group.opc) {
+ _ => .unallocated,
+ .movn => .{ .movn = inst.movn },
+ .movz => .{ .movz = inst.movz },
+ .movk => .{ .movk = inst.movk },
+ };
+ }
+ };
+
+ /// Bitfield
+ pub const Bitfield = packed union {
+ group: @This().Group,
+ sbfm: Sbfm,
+ bfm: Bfm,
+ ubfm: Ubfm,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Sbfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .sbfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Bfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .bfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Ubfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .ubfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Opc = enum(u2) {
+ sbfm = 0b00,
+ bfm = 0b01,
+ ubfm = 0b10,
+ _,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ sbfm: Sbfm,
+ bfm: Bfm,
+ ubfm: Ubfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (!inst.group.imm.validBitfield(inst.group.sf))
+ .unallocated
+ else switch (inst.group.opc) {
+ _ => .unallocated,
+ .sbfm => .{ .sbfm = inst.sbfm },
+ .bfm => .{ .bfm = inst.bfm },
+ .ubfm => .{ .ubfm = inst.ubfm },
+ };
+ }
+ };
+
+ /// Extract
+ pub const Extract = packed union {
+ group: @This().Group,
+ extr: Extr,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imms: u6,
+ Rm: Register.Encoded,
+ o0: u1,
+ N: Register.IntegerSize,
+ decoded23: u6 = 0b100111,
+ op21: u2,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Extr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imms: u6,
+ Rm: Register.Encoded,
+ o0: u1 = 0b0,
+ N: Register.IntegerSize,
+ decoded23: u6 = 0b100111,
+ op21: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ extr: Extr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op21) {
+ 0b01, 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.o0) {
+ 0b1 => .unallocated,
+ 0b0 => if ((inst.group.sf == .word and @as(u1, @truncate(inst.group.imms >> 5)) == 0b1) or
+ inst.group.sf != inst.group.N)
+ .unallocated
+ else
+ .{ .extr = inst.extr },
+ },
+ };
+ }
+ };
+
+ pub const Bitmask = packed struct {
+ imms: u6,
+ immr: u6,
+ N: Register.IntegerSize,
+
+ fn lenHsb(bitmask: Bitmask) u7 {
+ return @bitCast(packed struct {
+ not_imms: u6,
+ N: Register.IntegerSize,
+ }{ .not_imms = ~bitmask.imms, .N = bitmask.N });
+ }
+
+ fn validImmediate(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ if (sf == .word and bitmask.N == .doubleword) return false;
+ const len_hsb = bitmask.lenHsb();
+ return (len_hsb -% 1) & len_hsb != 0b0_000000;
+ }
+
+ fn validBitfield(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ if (sf != bitmask.N) return false;
+ if (sf == .word and (@as(u1, @truncate(bitmask.immr >> 5)) != 0b0 or
+ @as(u1, @truncate(bitmask.imms >> 5)) != 0b0)) return false;
+ const len_hsb = bitmask.lenHsb();
+ return len_hsb >= 0b0_000010;
+ }
+
+ fn decode(bitmask: Bitmask, sf: Register.IntegerSize) struct { u64, u64 } {
+ const esize = @as(u7, 1 << 6) >> @clz(bitmask.lenHsb());
+ const levels: u6 = @intCast(esize - 1);
+ const s = bitmask.imms & levels;
+ const r = bitmask.immr & levels;
+ const d = (s -% r) & levels;
+ const welem = @as(u64, std.math.maxInt(u64)) >> (63 - s);
+ const telem = @as(u64, std.math.maxInt(u64)) >> (63 - d);
+ const emask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - esize);
+ const rmask = @divExact(std.math.maxInt(u64), emask);
+ const wmask = std.math.rotr(u64, welem * rmask, r);
+ const tmask = telem * rmask;
+ return switch (sf) {
+ .word => .{ @as(u32, @truncate(wmask)), @as(u32, @truncate(tmask)) },
+ .doubleword => .{ wmask, tmask },
+ };
+ }
+
+ pub fn decodeImmediate(bitmask: Bitmask, sf: Register.IntegerSize) u64 {
+ assert(bitmask.validImmediate(sf));
+ const imm, _ = bitmask.decode(sf);
+ return imm;
+ }
+
+ pub fn decodeBitfield(bitmask: Bitmask, sf: Register.IntegerSize) struct { u64, u64 } {
+ assert(bitmask.validBitfield(sf));
+ return bitmask.decode(sf);
+ }
+
+ pub fn moveWidePreferred(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ const s = bitmask.imms;
+ const r = bitmask.immr;
+ const width: u7 = switch (sf) {
+ .word => 32,
+ .doubleword => 64,
+ };
+ if (sf != bitmask.N) return false;
+ if (sf == .word and @as(u1, @truncate(s >> 5)) != 0b0) return false;
+ if (s < 16) return (-%r % 16) <= (15 - s);
+ if (s >= width - 15) return (r % 16) <= (s - (width - 15));
+ return false;
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ pc_relative_addressing: PcRelativeAddressing,
+ add_subtract_immediate: AddSubtractImmediate,
+ add_subtract_immediate_with_tags: AddSubtractImmediateWithTags,
+ logical_immediate: LogicalImmediate,
+ move_wide_immediate: MoveWideImmediate,
+ bitfield: Bitfield,
+ extract: Extract,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b000, 0b001 => .{ .pc_relative_addressing = inst.pc_relative_addressing },
+ 0b010 => .{ .add_subtract_immediate = inst.add_subtract_immediate },
+ 0b011 => .{ .add_subtract_immediate_with_tags = inst.add_subtract_immediate_with_tags },
+ 0b100 => .{ .logical_immediate = inst.logical_immediate },
+ 0b101 => .{ .move_wide_immediate = inst.move_wide_immediate },
+ 0b110 => .{ .bitfield = inst.bitfield },
+ 0b111 => .{ .extract = inst.extract },
+ };
+ }
+ };
+
+ /// C4.1.87 Branches, Exception Generating and System instructions
+ pub const BranchExceptionGeneratingSystem = packed union {
+ group: @This().Group,
+ conditional_branch_immediate: ConditionalBranchImmediate,
+ exception_generating: ExceptionGenerating,
+ system_register_argument: SystemRegisterArgument,
+ hints: Hints,
+ barriers: Barriers,
+ pstate: Pstate,
+ system_result: SystemResult,
+ system: System,
+ system_register_move: SystemRegisterMove,
+ unconditional_branch_register: UnconditionalBranchRegister,
+ unconditional_branch_immediate: UnconditionalBranchImmediate,
+ compare_branch_immediate: CompareBranchImmediate,
+ test_branch_immediate: TestBranchImmediate,
+
+ /// Table C4-88 Encoding table for the Branches, Exception Generating and System instructions group
+ pub const Group = packed struct {
+ op2: u5,
+ encoded5: u7,
+ op1: u14,
+ decoded26: u3 = 0b101,
+ op0: u3,
+ };
+
+ /// Conditional branch (immediate)
+ pub const ConditionalBranchImmediate = packed union {
+ group: @This().Group,
+ b: B,
+ bc: Bc,
+
+ pub const Group = packed struct {
+ cond: ConditionCode,
+ o0: u1,
+ imm19: i19,
+ o1: u1,
+ decoded25: u7 = 0b0101010,
+ };
+
+ /// C6.2.26 B.cond
+ pub const B = packed struct {
+ cond: ConditionCode,
+ o0: u1 = 0b0,
+ imm19: i19,
+ o1: u1 = 0b0,
+ decoded25: u7 = 0b0101010,
+ };
+
+ /// C6.2.27 BC.cond
+ pub const Bc = packed struct {
+ cond: ConditionCode,
+ o0: u1 = 0b1,
+ imm19: i19,
+ o1: u1 = 0b0,
+ decoded25: u7 = 0b0101010,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ b: B,
+ bc: Bc,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.o1) {
+ 0b0 => switch (inst.group.o0) {
+ 0b0 => .{ .b = inst.b },
+ 0b1 => .{ .bc = inst.bc },
+ },
+ 0b1 => .unallocated,
+ };
+ }
+ };
+
+ /// Exception generating
+ pub const ExceptionGenerating = packed union {
+ group: @This().Group,
+ svc: Svc,
+ hvc: Hvc,
+ smc: Smc,
+ brk: Brk,
+ hlt: Hlt,
+ tcancel: Tcancel,
+ dcps1: Dcps1,
+ dcps2: Dcps2,
+ dcps3: Dcps3,
+
+ pub const Group = packed struct {
+ LL: u2,
+ op2: u3,
+ imm16: u16,
+ opc: u3,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.365 SVC
+ pub const Svc = packed struct {
+ decoded0: u2 = 0b01,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.128 HVC
+ pub const Hvc = packed struct {
+ decoded0: u2 = 0b10,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.283 SMC
+ pub const Smc = packed struct {
+ decoded0: u2 = 0b11,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.40 BRK
+ pub const Brk = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b001,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.127 HLT
+ pub const Hlt = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b010,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.376 TCANCEL
+ pub const Tcancel = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b011,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS1
+ pub const Dcps1 = packed struct {
+ LL: u2 = 0b01,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS2
+ pub const Dcps2 = packed struct {
+ LL: u2 = 0b10,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS3
+ pub const Dcps3 = packed struct {
+ LL: u2 = 0b11,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ svc: Svc,
+ hvc: Hvc,
+ smc: Smc,
+ brk: Brk,
+ hlt: Hlt,
+ tcancel: Tcancel,
+ dcps1: Dcps1,
+ dcps2: Dcps2,
+ dcps3: Dcps3,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op2) {
+ 0b001 => .unallocated,
+ 0b010...0b011 => .unallocated,
+ 0b100...0b111 => .unallocated,
+ 0b000 => switch (inst.group.opc) {
+ 0b000 => switch (inst.group.LL) {
+ 0b00 => .unallocated,
+ 0b01 => .{ .svc = inst.svc },
+ 0b10 => .{ .hvc = inst.hvc },
+ 0b11 => .{ .smc = inst.smc },
+ },
+ 0b001 => switch (inst.group.LL) {
+ 0b01 => .unallocated,
+ 0b00 => .{ .brk = inst.brk },
+ 0b10...0b11 => .unallocated,
+ },
+ 0b010 => switch (inst.group.LL) {
+ 0b01 => .unallocated,
+ 0b00 => .{ .hlt = inst.hlt },
+ 0b10...0b11 => .unallocated,
+ },
+ 0b011 => switch (inst.group.LL) {
+ 0b00 => .{ .tcancel = inst.tcancel },
+ 0b01 => .unallocated,
+ 0b10...0b11 => .unallocated,
+ },
+ 0b100 => .unallocated,
+ 0b101 => switch (inst.group.LL) {
+ 0b00 => .unallocated,
+ 0b01 => .{ .dcps1 = inst.dcps1 },
+ 0b10 => .{ .dcps2 = inst.dcps2 },
+ 0b11 => .{ .dcps3 = inst.dcps3 },
+ },
+ 0b110 => .unallocated,
+ 0b111 => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// System instructions with register argument
+ pub const SystemRegisterArgument = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u20 = 0b11010101000000110001,
+ };
+
+ /// Hints
+ pub const Hints = packed union {
+ group: @This().Group,
+ hint: Hint,
+ nop: Nop,
+ yield: Yield,
+ wfe: Wfe,
+ wfi: Wfi,
+ sev: Sev,
+ sevl: Sevl,
+
+ pub const Group = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3,
+ CRm: u4,
+ decoded12: u20 = 0b11010101000000110010,
+ };
+
+ /// C6.2.126 HINT
+ pub const Hint = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.238 NOP
+ pub const Nop = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b000,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.402 YIELD
+ pub const Yield = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b001,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.396 WFE
+ pub const Wfe = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b010,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.398 WFI
+ pub const Wfi = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b011,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.280 SEV
+ pub const Sev = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b100,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.280 SEVL
+ pub const Sevl = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b101,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const Decoded = union(enum) {
+ hint: Hint,
+ nop: Nop,
+ yield: Yield,
+ wfe: Wfe,
+ wfi: Wfi,
+ sev: Sev,
+ sevl: Sevl,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.CRm) {
+ else => .{ .hint = inst.hint },
+ 0b0000 => switch (inst.group.op2) {
+ else => .{ .hint = inst.hint },
+ 0b000 => .{ .nop = inst.nop },
+ 0b001 => .{ .yield = inst.yield },
+ 0b010 => .{ .wfe = inst.wfe },
+ 0b011 => .{ .wfi = inst.wfi },
+ 0b100 => .{ .sev = inst.sev },
+ 0b101 => .{ .sevl = inst.sevl },
+ },
+ };
+ }
+ };
+
+ /// Barriers
+ pub const Barriers = packed union {
+ group: @This().Group,
+ clrex: Clrex,
+ dsb: Dsb,
+ dmb: Dmb,
+ isb: Isb,
+ sb: Sb,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.56 CLREX
+ pub const Clrex = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ op2: u3 = 0b010,
+ CRm: u4,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.116 DSB
+ pub const Dsb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b00,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.114 DMB
+ pub const Dmb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b01,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.131 ISB
+ pub const Isb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b10,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.264 SB
+ pub const Sb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b11,
+ decoded7: u1 = 0b1,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const Option = enum(u4) {
+ oshld = 0b0001,
+ oshst = 0b0010,
+ osh = 0b0011,
+ nshld = 0b0101,
+ nshst = 0b0110,
+ nsh = 0b0111,
+ ishld = 0b1001,
+ ishst = 0b1010,
+ ish = 0b1011,
+ ld = 0b1101,
+ st = 0b1110,
+ sy = 0b1111,
+ _,
+ };
+ };
+
+ /// PSTATE
+ pub const Pstate = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0100,
+ op1: u3,
+ decoded19: u13 = 0b1101010100000,
+ };
+
+ /// System with result
+ pub const SystemResult = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u13 = 0b1101010100100,
+ };
+
+ /// System instructions
+ pub const System = packed union {
+ group: @This().Group,
+ sys: Sys,
+ sysl: Sysl,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.372 SYS
+ pub const Sys = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L = .sys,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.373 SYSL
+ pub const Sysl = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L = .sysl,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ const L = enum(u1) {
+ sys = 0b0,
+ sysl = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ sys: Sys,
+ sysl: Sysl,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.L) {
+ .sys => .{ .sys = inst.sys },
+ .sysl => .{ .sysl = inst.sysl },
+ };
+ }
+ };
+
+ /// System register move
+ pub const SystemRegisterMove = packed union {
+ group: @This().Group,
+ msr: Msr,
+ mrs: Mrs,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ systemreg: Register.System,
+ L: L,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.230 MSR (register)
+ pub const Msr = packed struct {
+ Rt: Register.Encoded,
+ systemreg: Register.System,
+ L: L = .msr,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.228 MRS
+ pub const Mrs = packed struct {
+ Rt: Register.Encoded,
+ systemreg: Register.System,
+ L: L = .mrs,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const L = enum(u1) {
+ msr = 0b0,
+ mrs = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ msr: Msr,
+ mrs: Mrs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.L) {
+ .msr => .{ .msr = inst.msr },
+ .mrs => .{ .mrs = inst.mrs },
+ };
+ }
+ };
+
+ /// Unconditional branch (register)
+ pub const UnconditionalBranchRegister = packed union {
+ group: @This().Group,
+ br: Br,
+ blr: Blr,
+ ret: Ret,
+
+ pub const Group = packed struct {
+ op4: u5,
+ Rn: Register.Encoded,
+ op3: u6,
+ op2: u5,
+ opc: u4,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.37 BR
+ pub const Br = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b00,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.35 BLR
+ pub const Blr = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b01,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.254 RET
+ pub const Ret = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b10,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ br: Br,
+ blr: Blr,
+ ret: Ret,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op2) {
+ else => .unallocated,
+ 0b11111 => switch (inst.group.opc) {
+ 0b0000 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .br = inst.br },
+ },
+ 0b0001 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .blr = inst.blr },
+ },
+ 0b0010 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .ret = inst.ret },
+ },
+ else => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// Unconditional branch (immediate)
+ pub const UnconditionalBranchImmediate = packed union {
+ group: @This().Group,
+ b: B,
+ bl: Bl,
+
+ pub const Group = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op,
+ };
+
+ /// C6.2.25 B
+ pub const B = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op = .b,
+ };
+
+ /// C6.2.34 BL
+ pub const Bl = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op = .bl,
+ };
+
+ pub const Op = enum(u1) {
+ b = 0b0,
+ bl = 0b1,
+ };
+ };
+
+ /// Compare and branch (immediate)
+ pub const CompareBranchImmediate = packed union {
+ group: @This().Group,
+ cbz: Cbz,
+ cbnz: Cbnz,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.47 CBZ
+ pub const Cbz = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op = .cbz,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.46 CBNZ
+ pub const Cbnz = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op = .cbnz,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ cbz = 0b0,
+ cbnz = 0b1,
+ };
+ };
+
+ /// Test and branch (immediate)
+ pub const TestBranchImmediate = packed union {
+ group: @This().Group,
+ tbz: Tbz,
+ tbnz: Tbnz,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ /// C6.2.375 TBZ
+ pub const Tbz = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op = .tbz,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ /// C6.2.374 TBNZ
+ pub const Tbnz = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op = .tbnz,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ pub const Op = enum(u1) {
+ tbz = 0b0,
+ tbnz = 0b1,
+ };
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ conditional_branch_immediate: ConditionalBranchImmediate,
+ exception_generating: ExceptionGenerating,
+ system_register_argument: SystemRegisterArgument,
+ hints: Hints,
+ barriers: Barriers,
+ pstate: Pstate,
+ system_result: SystemResult,
+ system: System,
+ system_register_move: SystemRegisterMove,
+ unconditional_branch_register: UnconditionalBranchRegister,
+ unconditional_branch_immediate: UnconditionalBranchImmediate,
+ compare_branch_immediate: CompareBranchImmediate,
+ test_branch_immediate: TestBranchImmediate,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b010 => switch (inst.group.op1) {
+ 0b000000000000000...0b01111111111111 => .{ .conditional_branch_immediate = inst.conditional_branch_immediate },
+ else => .unallocated,
+ },
+ 0b110 => switch (inst.group.op1) {
+ 0b00000000000000...0b00111111111111 => .{ .exception_generating = inst.exception_generating },
+ 0b01000000110001 => .{ .system_register_argument = inst.system_register_argument },
+ 0b01000000110010 => switch (inst.group.op2) {
+ 0b11111 => .{ .hints = inst.hints },
+ else => .unallocated,
+ },
+ 0b01000000110011 => .{ .barriers = inst.barriers },
+ 0b01000000000100,
+ 0b01000000010100,
+ 0b01000000100100,
+ 0b01000000110100,
+ 0b01000001000100,
+ 0b01000001010100,
+ 0b01000001100100,
+ 0b01000001110100,
+ => .{ .pstate = inst.pstate },
+ 0b01001000000000...0b01001001111111 => .{ .system_result = inst.system_result },
+ 0b01000010000000...0b01000011111111, 0b01001010000000...0b01001011111111 => .{ .system = inst.system },
+ 0b01000100000000...0b01000111111111, 0b01001100000000...0b01001111111111 => .{ .system_register_move = inst.system_register_move },
+ 0b10000000000000...0b11111111111111 => .{ .unconditional_branch_register = inst.unconditional_branch_register },
+ else => .unallocated,
+ },
+ 0b000, 0b100 => .{ .unconditional_branch_immediate = inst.unconditional_branch_immediate },
+ 0b001, 0b101 => switch (inst.group.op1) {
+ 0b00000000000000...0b01111111111111 => .{ .compare_branch_immediate = inst.compare_branch_immediate },
+ 0b10000000000000...0b11111111111111 => .{ .test_branch_immediate = inst.test_branch_immediate },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ /// C4.1.88 Loads and Stores
+ pub const LoadStore = packed union {
+ group: @This().Group,
+ register_literal: RegisterLiteral,
+ memory: Memory,
+ no_allocate_pair_offset: NoAllocatePairOffset,
+ register_pair_post_indexed: RegisterPairPostIndexed,
+ register_pair_offset: RegisterPairOffset,
+ register_pair_pre_indexed: RegisterPairPreIndexed,
+ register_unscaled_immediate: RegisterUnscaledImmediate,
+ register_immediate_post_indexed: RegisterImmediatePostIndexed,
+ register_unprivileged: RegisterUnprivileged,
+ register_immediate_pre_indexed: RegisterImmediatePreIndexed,
+ register_register_offset: RegisterRegisterOffset,
+ register_unsigned_immediate: RegisterUnsignedImmediate,
+
+ /// Table C4-89 Encoding table for the Loads and Stores group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op4: u2,
+ encoded12: u4,
+ op3: u6,
+ encoded22: u1,
+ op2: u2,
+ decoded25: u1 = 0b0,
+ op1: bool,
+ decoded27: u1 = 0b1,
+ op0: u4,
+ };
+
+ /// Load register (literal)
+ pub const RegisterLiteral = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b011,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2,
+ };
+
+ /// C6.2.167 LDR (literal)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ sf: Register.IntegerSize,
+ opc1: u1 = 0b0,
+ };
+
+ /// C6.2.179 LDRSW (literal)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2 = 0b10,
+ };
+
+ /// C6.2.248 PRFM (literal)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2 = 0b11,
+ };
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b011,
+ opc: VectorSize,
+ };
+
+ /// C7.2.192 LDR (literal, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b011,
+ opc: VectorSize,
+ };
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Memory Copy and Memory Set
+ pub const Memory = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ op2: u4,
+ Rs: Register.Encoded,
+ decoded21: u1 = 0b0,
+ op1: u2,
+ decoded24: u2 = 0b01,
+ o0: u1,
+ decoded27: u3 = 0b011,
+ size: IntegerSize,
+ };
+
+ /// Load/store no-allocate pair (offset)
+ pub const NoAllocatePairOffset = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b000,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// Load/store register pair (post-indexed)
+ pub const RegisterPairPostIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register pair (offset)
+ pub const RegisterPairOffset = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register pair (pre-indexed)
+ pub const RegisterPairPreIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unscaled immediate)
+ pub const RegisterUnscaledImmediate = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ sturb: Sturb,
+ ldurb: Ldurb,
+ ldursb: Ldursb,
+ sturh: Sturh,
+ ldurh: Ldurh,
+ ldursh: Ldursh,
+ stur: Stur,
+ ldur: Ldur,
+ ldursw: Ldursw,
+ prfum: Prfum,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.347 STURB
+ pub const Sturb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.203 LDURB
+ pub const Ldurb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.205 LDURSB
+ pub const Ldursb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.348 STURH
+ pub const Sturh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.204 LDURH
+ pub const Ldurh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.206 LDURSH
+ pub const Ldursh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.346 STUR
+ pub const Stur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.202 LDUR
+ pub const Ldur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.207 LDURSW
+ pub const Ldursw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.250 PRFUM
+ pub const Prfum = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ sturb: Sturb,
+ ldurb: Ldurb,
+ ldursb: Ldursb,
+ sturh: Sturh,
+ ldurh: Ldurh,
+ ldursh: Ldursh,
+ stur: Stur,
+ ldur: Ldur,
+ ldursw: Ldursw,
+ prfum: Prfum,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .sturb = inst.sturb },
+ 0b01 => .{ .ldurb = inst.ldurb },
+ 0b10, 0b11 => .{ .ldursb = inst.ldursb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .sturh = inst.sturh },
+ 0b01 => .{ .ldurh = inst.ldurh },
+ 0b10, 0b11 => .{ .ldursh = inst.ldursh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .stur = inst.stur },
+ 0b01 => .{ .ldur = inst.ldur },
+ 0b10 => .{ .ldursw = inst.ldursw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .stur = inst.stur },
+ 0b01 => .{ .ldur = inst.ldur },
+ 0b10 => .{ .prfum = inst.prfum },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stur: Stur,
+ ldur: Ldur,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.333 STUR (SIMD&FP)
+ pub const Stur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.194 LDUR (SIMD&FP)
+ pub const Ldur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stur: Stur,
+ ldur: Ldur,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .stur = inst.stur },
+ .load => .{ .ldur = inst.ldur },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .stur = inst.stur },
+ .load => .{ .ldur = inst.ldur },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (immediate post-indexed)
+ pub const RegisterImmediatePostIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10, 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unprivileged)
+ pub const RegisterUnprivileged = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// Load/store register (immediate pre-indexed)
+ pub const RegisterImmediatePreIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ .halfword => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ .word => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ .doubleword => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10, 0b11 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (register offset)
+ pub const RegisterRegisterOffset = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.325 STRB (register)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.171 LDRB (register)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.175 LDRSB (register)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.327 STRH (register)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.173 LDRH (register)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.177 LDRSH (register)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.323 STR (register)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.168 LDR (register)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.180 LDRSW (register)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.249 PRFM (register)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .prfm = inst.prfm },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.332 STR (register, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.193 LDR (register, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+ };
+
+ pub const Option = enum(u3) {
+ uxtw = 0b010,
+ lsl = 0b011,
+ sxtw = 0b110,
+ sxtx = 0b111,
+ _,
+
+ pub fn sf(option: Option) Register.IntegerSize {
+ return switch (option) {
+ .uxtw, .sxtw => .word,
+ .lsl, .sxtx => .doubleword,
+ _ => unreachable,
+ };
+ }
+ };
+
+ pub const Extend = union(Option) {
+ uxtw: Amount,
+ lsl: Amount,
+ sxtw: Amount,
+ sxtx: Amount,
+
+ pub const Amount = u3;
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unsigned immediate)
+ pub const RegisterUnsignedImmediate = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2,
+ decoded24: u2 = 0b01,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.247 PRFM (immediate)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .prfm = inst.prfm },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ pub const L = enum(u1) {
+ store = 0b0,
+ load = 0b1,
+ };
+
+ pub const IntegerSize = enum(u2) {
+ byte = 0b00,
+ halfword = 0b01,
+ word = 0b10,
+ doubleword = 0b11,
+ };
+
+ pub const VectorSize = enum(u2) {
+ single = 0b00,
+ double = 0b01,
+ quad = 0b10,
+ _,
+
+ pub fn decode(vs: VectorSize) Register.VectorSize {
+ return switch (vs) {
+ .single => .single,
+ .double => .double,
+ .quad => .quad,
+ _ => unreachable,
+ };
+ }
+
+ pub fn encode(vs: Register.VectorSize) VectorSize {
+ return switch (vs) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .quad => .quad,
+ };
+ }
+ };
+
+ pub const PrfOp = packed struct {
+ policy: Policy,
+ target: Target,
+ type: Type,
+
+ pub const Policy = enum(u1) {
+ keep = 0b0,
+ strm = 0b1,
+ };
+
+ pub const Target = enum(u2) {
+ l1 = 0b00,
+ l2 = 0b01,
+ l3 = 0b10,
+ _,
+ };
+
+ pub const Type = enum(u2) {
+ pld = 0b00,
+ pli = 0b01,
+ pst = 0b10,
+ _,
+ };
+
+ pub const pldl1keep: PrfOp = .{ .type = .pld, .target = .l1, .policy = .keep };
+ pub const pldl1strm: PrfOp = .{ .type = .pld, .target = .l1, .policy = .strm };
+ pub const pldl2keep: PrfOp = .{ .type = .pld, .target = .l2, .policy = .keep };
+ pub const pldl2strm: PrfOp = .{ .type = .pld, .target = .l2, .policy = .strm };
+ pub const pldl3keep: PrfOp = .{ .type = .pld, .target = .l3, .policy = .keep };
+ pub const pldl3strm: PrfOp = .{ .type = .pld, .target = .l3, .policy = .strm };
+ pub const plil1keep: PrfOp = .{ .type = .pli, .target = .l1, .policy = .keep };
+ pub const plil1strm: PrfOp = .{ .type = .pli, .target = .l1, .policy = .strm };
+ pub const plil2keep: PrfOp = .{ .type = .pli, .target = .l2, .policy = .keep };
+ pub const plil2strm: PrfOp = .{ .type = .pli, .target = .l2, .policy = .strm };
+ pub const plil3keep: PrfOp = .{ .type = .pli, .target = .l3, .policy = .keep };
+ pub const plil3strm: PrfOp = .{ .type = .pli, .target = .l3, .policy = .strm };
+ pub const pstl1keep: PrfOp = .{ .type = .pst, .target = .l1, .policy = .keep };
+ pub const pstl1strm: PrfOp = .{ .type = .pst, .target = .l1, .policy = .strm };
+ pub const pstl2keep: PrfOp = .{ .type = .pst, .target = .l2, .policy = .keep };
+ pub const pstl2strm: PrfOp = .{ .type = .pst, .target = .l2, .policy = .strm };
+ pub const pstl3keep: PrfOp = .{ .type = .pst, .target = .l3, .policy = .keep };
+ pub const pstl3strm: PrfOp = .{ .type = .pst, .target = .l3, .policy = .strm_ };
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ register_literal: RegisterLiteral,
+ memory: Memory,
+ no_allocate_pair_offset: NoAllocatePairOffset,
+ register_pair_post_indexed: RegisterPairPostIndexed,
+ register_pair_offset: RegisterPairOffset,
+ register_pair_pre_indexed: RegisterPairPreIndexed,
+ register_unscaled_immediate: RegisterUnscaledImmediate,
+ register_immediate_post_indexed: RegisterImmediatePostIndexed,
+ register_unprivileged: RegisterUnprivileged,
+ register_immediate_pre_indexed: RegisterImmediatePreIndexed,
+ register_register_offset: RegisterRegisterOffset,
+ register_unsigned_immediate: RegisterUnsignedImmediate,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ else => .unallocated,
+ 0b0010, 0b0110, 0b1010, 0b1110 => switch (inst.group.op2) {
+ 0b00 => .{ .no_allocate_pair_offset = inst.no_allocate_pair_offset },
+ 0b01 => .{ .register_pair_post_indexed = inst.register_pair_post_indexed },
+ 0b10 => .{ .register_pair_offset = inst.register_pair_offset },
+ 0b11 => .{ .register_pair_pre_indexed = inst.register_pair_pre_indexed },
+ },
+ 0b0011, 0b0111, 0b1011, 0b1111 => switch (inst.group.op2) {
+ 0b00...0b01 => switch (inst.group.op3) {
+ 0b000000...0b011111 => switch (inst.group.op4) {
+ 0b00 => .{ .register_unscaled_immediate = inst.register_unscaled_immediate },
+ 0b01 => .{ .register_immediate_post_indexed = inst.register_immediate_post_indexed },
+ 0b10 => .{ .register_unprivileged = inst.register_unprivileged },
+ 0b11 => .{ .register_immediate_pre_indexed = inst.register_immediate_pre_indexed },
+ },
+ 0b100000...0b111111 => switch (inst.group.op4) {
+ 0b00 => .unallocated,
+ 0b10 => .{ .register_register_offset = inst.register_register_offset },
+ 0b01, 0b11 => .unallocated,
+ },
+ },
+ 0b10...0b11 => .{ .register_unsigned_immediate = inst.register_unsigned_immediate },
+ },
+ };
+ }
+ };
+
+ /// C4.1.89 Data Processing -- Register
+ pub const DataProcessingRegister = packed union {
+ group: @This().Group,
+ data_processing_two_source: DataProcessingTwoSource,
+ data_processing_one_source: DataProcessingOneSource,
+ logical_shifted_register: LogicalShiftedRegister,
+ add_subtract_shifted_register: AddSubtractShiftedRegister,
+ add_subtract_extended_register: AddSubtractExtendedRegister,
+ add_subtract_with_carry: AddSubtractWithCarry,
+ rotate_right_into_flags: RotateRightIntoFlags,
+ evaluate_into_flags: EvaluateIntoFlags,
+ conditional_compare_register: ConditionalCompareRegister,
+ conditional_compare_immediate: ConditionalCompareImmediate,
+ conditional_select: ConditionalSelect,
+ data_processing_three_source: DataProcessingThreeSource,
+
+ /// Table C4-90 Encoding table for the Data Processing -- Register group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op3: u6,
+ encoded16: u5,
+ op2: u4,
+ decoded25: u3 = 0b101,
+ op1: u1,
+ encoded29: u1,
+ op0: u1,
+ encoded31: u1,
+ };
+
+ /// Data-processing (2 source)
+ pub const DataProcessingTwoSource = packed union {
+ group: @This().Group,
+ udiv: Udiv,
+ sdiv: Sdiv,
+ lslv: Lslv,
+ lsrv: Lsrv,
+ asrv: Asrv,
+ rorv: Rorv,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opcode: u6,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.388 UDIV
+ pub const Udiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ o1: DivOp = .udiv,
+ decoded11: u5 = 0b00001,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.270 SDIV
+ pub const Sdiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ o1: DivOp = .sdiv,
+ decoded11: u5 = 0b00001,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.214 LSLV
+ pub const Lslv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .lslv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.217 LSRV
+ pub const Lsrv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .lsrv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.18 ASRV
+ pub const Asrv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .asrv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.263 RORV
+ pub const Rorv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .rorv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ pub const DivOp = enum(u1) {
+ udiv = 0b0,
+ sdiv = 0b1,
+ };
+
+ pub const ShiftOp = enum(u2) {
+ lslv = 0b00,
+ lsrv = 0b01,
+ asrv = 0b10,
+ rorv = 0b11,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ udiv: Udiv,
+ sdiv: Sdiv,
+ lslv: Lslv,
+ lsrv: Lsrv,
+ asrv: Asrv,
+ rorv: Rorv,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ false => switch (inst.group.opcode) {
+ else => .unallocated,
+ 0b000010 => .{ .udiv = inst.udiv },
+ 0b000011 => .{ .sdiv = inst.sdiv },
+ 0b001000 => .{ .lslv = inst.lslv },
+ 0b001001 => .{ .lsrv = inst.lsrv },
+ 0b001010 => .{ .asrv = inst.asrv },
+ 0b001011 => .{ .rorv = inst.rorv },
+ },
+ true => .unallocated,
+ };
+ }
+ };
+
+ /// Data-processing (1 source)
+ pub const DataProcessingOneSource = packed union {
+ group: @This().Group,
+ rbit: Rbit,
+ rev16: Rev16,
+ rev32: Rev32,
+ rev: Rev,
+ clz: Clz,
+ cls: Cls,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opcode: u6,
+ opcode2: u5,
+ decoded21: u8 = 0b11010110,
+ S: bool,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.253 RBIT
+ pub const Rbit = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.257 REV16
+ pub const Rev16 = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc: u2 = 0b01,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.258 REV32
+ pub const Rev32 = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc: u2 = 0b10,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.256 REV
+ pub const Rev = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc0: Register.IntegerSize,
+ opc1: u1 = 0b1,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.58 CLZ
+ pub const Clz = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op: u1 = 0b0,
+ decoded11: u5 = 0b00010,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.57 CLS
+ pub const Cls = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op: u1 = 0b1,
+ decoded11: u5 = 0b00010,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ rbit: Rbit,
+ rev16: Rev16,
+ rev32: Rev32,
+ rev: Rev,
+ clz: Clz,
+ cls: Cls,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ true => .unallocated,
+ false => switch (inst.group.opcode2) {
+ else => .unallocated,
+ 0b00000 => switch (inst.group.opcode) {
+ else => .unallocated,
+ 0b000000 => .{ .rbit = inst.rbit },
+ 0b000001 => .{ .rev16 = inst.rev16 },
+ 0b000010 => switch (inst.group.sf) {
+ .word => .{ .rev = inst.rev },
+ .doubleword => .{ .rev32 = inst.rev32 },
+ },
+ 0b000011 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => .{ .rev = inst.rev },
+ },
+ 0b000100 => .{ .clz = inst.clz },
+ 0b000101 => .{ .cls = inst.cls },
+ },
+ },
+ };
+ }
+ };
+
+ /// Logical (shifted register)
+ pub const LogicalShiftedRegister = packed union {
+ group: @This().Group,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+ eon: Eon,
+ ands: Ands,
+ bics: Bics,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.13 AND (shifted register)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.32 BIC (shifted register)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.241 ORR (shifted register)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.239 ORN (shifted register)
+ pub const Orn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.120 EOR (shifted register)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.118 EON (shifted register)
+ pub const Eon = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.15 ANDS (shifted register)
+ pub const Ands = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.33 BICS (shifted register)
+ pub const Bics = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+ eon: Eon,
+ ands: Ands,
+ bics: Bics,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (inst.group.sf == .word and @as(u1, @truncate(inst.group.imm6 >> 5)) == 0b1)
+ .unallocated
+ else switch (inst.group.opc) {
+ .@"and" => switch (inst.group.N) {
+ false => .{ .@"and" = inst.@"and" },
+ true => .{ .bic = inst.bic },
+ },
+ .orr => switch (inst.group.N) {
+ false => .{ .orr = inst.orr },
+ true => .{ .orn = inst.orn },
+ },
+ .eor => switch (inst.group.N) {
+ false => .{ .eor = inst.eor },
+ true => .{ .eon = inst.eon },
+ },
+ .ands => switch (inst.group.N) {
+ false => .{ .ands = inst.ands },
+ true => .{ .bics = inst.bics },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (shifted register)
+ pub const AddSubtractShiftedRegister = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.5 ADD (shifted register)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.9 ADDS (shifted register)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.5 SUB (shifted register)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.9 SUBS (shifted register)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.shift) {
+ .ror => .unallocated,
+ .lsl, .lsr, .asr => if (inst.group.sf == .word and @as(u1, @truncate(inst.group.imm6 >> 5)) == 0b1)
+ .unallocated
+ else switch (inst.group.op) {
+ .add => switch (inst.group.S) {
+ false => .{ .add = inst.add },
+ true => .{ .adds = inst.adds },
+ },
+ .sub => switch (inst.group.S) {
+ false => .{ .sub = inst.sub },
+ true => .{ .subs = inst.subs },
+ },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (extended register)
+ pub const AddSubtractExtendedRegister = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2,
+ decoded24: u5 = 0b01011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.3 ADD (extended register)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.7 ADDS (extended register)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.356 SUB (extended register)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.362 SUBS (extended register)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Option = enum(u3) {
+ uxtb = 0b000,
+ uxth = 0b001,
+ uxtw = 0b010,
+ uxtx = 0b011,
+ sxtb = 0b100,
+ sxth = 0b101,
+ sxtw = 0b110,
+ sxtx = 0b111,
+
+ pub fn sf(option: Option) Register.IntegerSize {
+ return switch (option) {
+ .uxtb, .uxth, .uxtw, .sxtb, .sxth, .sxtw => .word,
+ .uxtx, .sxtx => .doubleword,
+ };
+ }
+ };
+
+ pub const Extend = union(Option) {
+ uxtb: Amount,
+ uxth: Amount,
+ uxtw: Amount,
+ uxtx: Amount,
+ sxtb: Amount,
+ sxth: Amount,
+ sxtw: Amount,
+ sxtx: Amount,
+
+ pub const Amount = u3;
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.imm3) {
+ 0b101 => .unallocated,
+ 0b110...0b111 => .unallocated,
+ 0b000...0b100 => switch (inst.group.opt) {
+ 0b01 => .unallocated,
+ 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.op) {
+ .add => switch (inst.group.S) {
+ false => .{ .add = inst.add },
+ true => .{ .adds = inst.adds },
+ },
+ .sub => switch (inst.group.S) {
+ false => .{ .sub = inst.sub },
+ true => .{ .subs = inst.subs },
+ },
+ },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (with carry)
+ pub const AddSubtractWithCarry = packed union {
+ group: @This().Group,
+ adc: Adc,
+ adcs: Adcs,
+ sbc: Sbc,
+ sbcs: Sbcs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.1 ADC
+ pub const Adc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = false,
+ op: Op = .adc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.2 ADCS
+ pub const Adcs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = true,
+ op: Op = .adc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.265 SBC
+ pub const Sbc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = false,
+ op: Op = .sbc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.266 SBCS
+ pub const Sbcs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = true,
+ op: Op = .sbc,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ adc = 0b0,
+ sbc = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ adc: Adc,
+ adcs: Adcs,
+ sbc: Sbc,
+ sbcs: Sbcs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op) {
+ .adc => switch (inst.group.S) {
+ false => .{ .adc = inst.adc },
+ true => .{ .adcs = inst.adcs },
+ },
+ .sbc => switch (inst.group.S) {
+ false => .{ .sbc = inst.sbc },
+ true => .{ .sbcs = inst.sbcs },
+ },
+ };
+ }
+ };
+
+ /// Rotate right into flags
+ pub const RotateRightIntoFlags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ mask: Nzcv,
+ o2: u1,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b0001,
+ imm6: u6,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Evaluate into flags
+ pub const EvaluateIntoFlags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ mask: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b0010,
+ sz: enum(u1) {
+ byte = 0b0,
+ word = 0b1,
+ },
+ opcode2: u6,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Conditional compare (register)
+ pub const ConditionalCompareRegister = packed union {
+ group: @This().Group,
+ ccmn: Ccmn,
+ ccmp: Ccmp,
+
+ pub const Group = packed struct {
+ nzcv: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ o2: u1,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.49 CCMN (register)
+ pub const Ccmn = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.51 CCMP (register)
+ pub const Ccmp = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmp,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ ccmn = 0b0,
+ ccmp = 0b1,
+ };
+ };
+
+ /// Conditional compare (immediate)
+ pub const ConditionalCompareImmediate = packed union {
+ group: @This().Group,
+ ccmn: Ccmn,
+ ccmp: Ccmp,
+
+ pub const Group = packed struct {
+ nzcv: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ o2: u1,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.48 CCMN (immediate)
+ pub const Ccmn = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.50 CCMP (immediate)
+ pub const Ccmp = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmp,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ ccmn = 0b0,
+ ccmp = 0b1,
+ };
+ };
+
+ /// Conditional select
+ pub const ConditionalSelect = packed union {
+ group: @This().Group,
+ csel: Csel,
+ csinc: Csinc,
+ csinv: Csinv,
+ csneg: Csneg,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.103 CSEL
+ pub const Csel = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b00,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.106 CSINC
+ pub const Csinc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b01,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.107 CSINV
+ pub const Csinv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b00,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.108 CSNEG
+ pub const Csneg = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b01,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ csel: Csel,
+ csinc: Csinc,
+ csinv: Csinv,
+ csneg: Csneg,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ true => .unallocated,
+ false => switch (inst.group.op) {
+ 0b0 => switch (inst.group.op2) {
+ 0b10...0b11 => .unallocated,
+ 0b00 => .{ .csel = inst.csel },
+ 0b01 => .{ .csinc = inst.csinc },
+ },
+ 0b1 => switch (inst.group.op2) {
+ 0b10...0b11 => .unallocated,
+ 0b00 => .{ .csinv = inst.csinv },
+ 0b01 => .{ .csneg = inst.csneg },
+ },
+ },
+ };
+ }
+ };
+
+ /// Data-processing (3 source)
+ pub const DataProcessingThreeSource = packed union {
+ group: @This().Group,
+ madd: Madd,
+ msub: Msub,
+ smaddl: Smaddl,
+ smsubl: Smsubl,
+ smulh: Smulh,
+ umaddl: Umaddl,
+ umsubl: Umsubl,
+ umulh: Umulh,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp,
+ Rm: Register.Encoded,
+ op31: u3,
+ decoded24: u5 = 0b11011,
+ op54: u2,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.218 MADD
+ pub const Madd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op31: u3 = 0b000,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.231 MSUB
+ pub const Msub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op31: u3 = 0b000,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.282 SMADDL
+ pub const Smaddl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.287 SMSUBL
+ pub const Smsubl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.288 SMULH
+ pub const Smulh = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded = @enumFromInt(0b11111),
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b10,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.389 UMADDL
+ pub const Umaddl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.391 UMSUBL
+ pub const Umsubl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.392 UMULH
+ pub const Umulh = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded = @enumFromInt(0b11111),
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b10,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ madd: Madd,
+ msub: Msub,
+ smaddl: Smaddl,
+ smsubl: Smsubl,
+ smulh: Smulh,
+ umaddl: Umaddl,
+ umsubl: Umsubl,
+ umulh: Umulh,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op54) {
+ 0b01, 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.op31) {
+ 0b011, 0b100, 0b111 => .unallocated,
+ 0b000 => switch (inst.group.o0) {
+ .add => .{ .madd = inst.madd },
+ .sub => .{ .msub = inst.msub },
+ },
+ 0b001 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .smaddl = inst.smaddl },
+ .sub => .{ .smsubl = inst.smsubl },
+ },
+ },
+ 0b010 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .smulh = inst.smulh },
+ .sub => .unallocated,
+ },
+ },
+ 0b101 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .umaddl = inst.umaddl },
+ .sub => .{ .umsubl = inst.umsubl },
+ },
+ },
+ 0b110 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .umulh = inst.umulh },
+ .sub => .unallocated,
+ },
+ },
+ },
+ };
+ }
+ };
+
+ pub const Shift = union(enum(u2)) {
+ lsl: Amount = 0b00,
+ lsr: Amount = 0b01,
+ asr: Amount = 0b10,
+ ror: Amount = 0b11,
+
+ pub const Op = @typeInfo(Shift).@"union".tag_type.?;
+ pub const Amount = u6;
+ pub const none: Shift = .{ .lsl = 0 };
+ };
+
+ pub const Nzcv = packed struct { v: bool, c: bool, z: bool, n: bool };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ data_processing_two_source: DataProcessingTwoSource,
+ data_processing_one_source: DataProcessingOneSource,
+ logical_shifted_register: LogicalShiftedRegister,
+ add_subtract_shifted_register: AddSubtractShiftedRegister,
+ add_subtract_extended_register: AddSubtractExtendedRegister,
+ add_subtract_with_carry: AddSubtractWithCarry,
+ rotate_right_into_flags: RotateRightIntoFlags,
+ evaluate_into_flags: EvaluateIntoFlags,
+ conditional_compare_register: ConditionalCompareRegister,
+ conditional_compare_immediate: ConditionalCompareImmediate,
+ conditional_select: ConditionalSelect,
+ data_processing_three_source: DataProcessingThreeSource,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op1) {
+ 0b0 => switch (@as(u1, @truncate(inst.group.op2 >> 3))) {
+ 0b0 => .{ .logical_shifted_register = inst.logical_shifted_register },
+ 0b1 => switch (@as(u1, @truncate(inst.group.op2 >> 0))) {
+ 0b0 => .{ .add_subtract_shifted_register = inst.add_subtract_shifted_register },
+ 0b1 => .{ .add_subtract_extended_register = inst.add_subtract_extended_register },
+ },
+ },
+ 0b1 => switch (inst.group.op2) {
+ 0b0000 => switch (inst.group.op3) {
+ 0b000000 => .{ .add_subtract_with_carry = inst.add_subtract_with_carry },
+ 0b000001, 0b100001 => .{ .rotate_right_into_flags = inst.rotate_right_into_flags },
+ 0b000010, 0b010010, 0b100010, 0b110010 => .{ .evaluate_into_flags = inst.evaluate_into_flags },
+ else => .unallocated,
+ },
+ 0b0010 => switch (@as(u1, @truncate(inst.group.op3 >> 1))) {
+ 0b0 => .{ .conditional_compare_register = inst.conditional_compare_register },
+ 0b1 => .{ .conditional_compare_immediate = inst.conditional_compare_immediate },
+ },
+ 0b0100 => .{ .conditional_select = inst.conditional_select },
+ 0b0110 => switch (inst.group.op0) {
+ 0b0 => .{ .data_processing_two_source = inst.data_processing_two_source },
+ 0b1 => .{ .data_processing_one_source = inst.data_processing_one_source },
+ },
+ 0b1000...0b1111 => .{ .data_processing_three_source = inst.data_processing_three_source },
+ else => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// C4.1.90 Data Processing -- Scalar Floating-Point and Advanced SIMD
+ pub const DataProcessingVector = packed union {
+ group: @This().Group,
+ simd_scalar_pairwise: SimdScalarPairwise,
+ simd_copy: SimdCopy,
+ simd_two_register_miscellaneous: SimdTwoRegisterMiscellaneous,
+ simd_across_lanes: SimdAcrossLanes,
+ simd_three_same: SimdThreeSame,
+ simd_modified_immediate: SimdModifiedImmediate,
+ convert_float_integer: ConvertFloatInteger,
+ float_data_processing_one_source: FloatDataProcessingOneSource,
+ float_compare: FloatCompare,
+ float_immediate: FloatImmediate,
+ float_data_processing_two_source: FloatDataProcessingTwoSource,
+ float_data_processing_three_source: FloatDataProcessingThreeSource,
+
+ /// Table C4-91 Encoding table for the Data Processing -- Scalar Floating-Point and Advanced SIMD group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op3: u9,
+ op2: u4,
+ op1: u2,
+ decoded25: u3 = 0b111,
+ op0: u4,
+ };
+
+ /// Advanced SIMD scalar pairwise
+ pub const SimdScalarPairwise = packed union {
+ group: @This().Group,
+ addp: Addp,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b11110,
+ U: u1,
+ decoded30: u2 = 0b01,
+ };
+
+ /// C7.2.4 ADDP (scalar)
+ pub const Addp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b11011,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b11110,
+ U: u1 = 0b0,
+ decoded30: u2 = 0b01,
+ };
+ };
+
+ /// Advanced SIMD copy
+ pub const SimdCopy = packed union {
+ group: @This().Group,
+ smov: Smov,
+ umov: Umov,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ imm4: u4,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ op: u1,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.279 SMOV
+ pub const Smov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ decoded11: u1 = 0b1,
+ decoded12: u1 = 0b0,
+ decoded13: u2 = 0b01,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ decoded29: u1 = 0b0,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.371 UMOV
+ pub const Umov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ decoded11: u1 = 0b1,
+ decoded12: u1 = 0b1,
+ decoded13: u2 = 0b01,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ decoded29: u1 = 0b0,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD two-register miscellaneous
+ pub const SimdTwoRegisterMiscellaneous = packed union {
+ group: @This().Group,
+ cnt: Cnt,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b10000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.38 CNT
+ pub const Cnt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b00101,
+ decoded17: u5 = 0b10000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD across lanes
+ pub const SimdAcrossLanes = packed union {
+ group: @This().Group,
+ addv: Addv,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.6 ADDV
+ pub const Addv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b11011,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD three same
+ pub const SimdThreeSame = packed union {
+ group: @This().Group,
+ addp: Addp,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.5 ADDP (vector)
+ pub const Addp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b10111,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.11 AND (vector)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .byte,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.21 BIC (vector, register)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .half,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.213 ORR (vector, register)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .single,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.211 ORN (vector)
+ pub const Orn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .double,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.41 EOR (vector)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .byte,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD modified immediate
+ pub const SimdModifiedImmediate = packed union {
+ group: @This().Group,
+ movi: Movi,
+ orr: Orr,
+ fmov: Fmov,
+ mvni: Mvni,
+ bic: Bic,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.204 MOVI
+ pub const Movi = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.212 ORR (vector, immediate)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode0: u1 = 0b1,
+ cmode: u3,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.129 FMOV (vector, immediate)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b1,
+ cmode: u4 = 0b1111,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.208 MVNI
+ pub const Mvni = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.20 BIC (vector, immediate)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode0: u1 = 0b1,
+ cmode: u3,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Conversion between floating-point and integer
+ pub const ConvertFloatInteger = packed union {
+ group: @This().Group,
+ fcvtns: Fcvtns,
+ fcvtnu: Fcvtnu,
+ scvtf: Scvtf,
+ ucvtf: Ucvtf,
+ fcvtas: Fcvtas,
+ fcvtau: Fcvtau,
+ fmov: Fmov,
+ fcvtps: Fcvtps,
+ fcvtpu: Fcvtpu,
+ fcvtms: Fcvtms,
+ fcvtmu: Fcvtmu,
+ fcvtzs: Fcvtzs,
+ fcvtzu: Fcvtzu,
+ fjcvtzs: Fjcvtzs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3,
+ rmode: u2,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.81 FCVTNS (scalar)
+ pub const Fcvtns = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.83 FCVTNU (scalar)
+ pub const Fcvtnu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.236 SCVTF (scalar, integer)
+ pub const Scvtf = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b010,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.355 UCVTF (scalar, integer)
+ pub const Ucvtf = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b011,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.71 FCVTAS (scalar)
+ pub const Fcvtas = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b100,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.73 FCVTAU (scalar)
+ pub const Fcvtau = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b101,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.131 FMOV (general)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: Opcode,
+ rmode: Fmov.Rmode,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+
+ pub const Opcode = enum(u3) {
+ float_to_integer = 0b110,
+ integer_to_float = 0b111,
+ _,
+ };
+
+ pub const Rmode = enum(u2) {
+ @"0" = 0b00,
+ @"1" = 0b01,
+ _,
+ };
+ };
+
+ /// C7.2.85 FCVTPS (scalar)
+ pub const Fcvtps = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .p,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.87 FCVTPU (scalar)
+ pub const Fcvtpu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .p,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.76 FCVTMS (scalar)
+ pub const Fcvtms = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .m,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.78 FCVTMU (scalar)
+ pub const Fcvtmu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .m,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.92 FCVTZS (scalar, integer)
+ pub const Fcvtzs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.96 FCVTZU (scalar, integer)
+ pub const Fcvtzu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.99 FJCVTZS
+ pub const Fjcvtzs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b110,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype = .double,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize = .word,
+ };
+
+ pub const Rmode = enum(u2) {
+ /// to nearest
+ n = 0b00,
+ /// toward plus infinity
+ p = 0b01,
+ /// toward minus infinity
+ m = 0b10,
+ /// toward zero
+ z = 0b11,
+ };
+ };
+
+ /// Floating-point data-processing (1 source)
+ pub const FloatDataProcessingOneSource = packed union {
+ group: @This().Group,
+ fmov: Fmov,
+ fabs: Fabs,
+ fneg: Fneg,
+ fsqrt: Fsqrt,
+ fcvt: Fcvt,
+ frintn: Frintn,
+ frintp: Frintp,
+ frintm: Frintm,
+ frintz: Frintz,
+ frinta: Frinta,
+ frintx: Frintx,
+ frinti: Frinti,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opcode: u6,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.130 FMOV (register)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b00,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.46 FABS (scalar)
+ pub const Fabs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b01,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.140 FNEG (scalar)
+ pub const Fneg = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b10,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.172 FSQRT (scalar)
+ pub const Fsqrt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b11,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.69 FCVT
+ pub const Fcvt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: Ftype,
+ decoded17: u4 = 0b0001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.162 FRINTN (scalar)
+ pub const Frintn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .n,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.164 FRINTP (scalar)
+ pub const Frintp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .p,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.160 FRINTM (scalar)
+ pub const Frintm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .m,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.168 FRINTZ (scalar)
+ pub const Frintz = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .z,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.156 FRINTA (scalar)
+ pub const Frinta = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .a,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.166 FRINTX (scalar)
+ pub const Frintx = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .x,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.158 FRINTI (scalar)
+ pub const Frinti = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .i,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Rmode = enum(u3) {
+ /// to nearest with ties to even
+ n = 0b000,
+ /// toward plus infinity
+ p = 0b001,
+ /// toward minus infinity
+ m = 0b010,
+ /// toward zero
+ z = 0b011,
+ /// to nearest with ties to away
+ a = 0b100,
+ /// exact, using current rounding mode
+ x = 0b110,
+ /// using current rounding mode
+ i = 0b111,
+ _,
+ };
+ };
+
+ /// Floating-point compare
+ pub const FloatCompare = packed union {
+ group: @This().Group,
+ fcmp: Fcmp,
+ fcmpe: Fcmpe,
+
+ pub const Group = packed struct {
+ opcode2: u5,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.66 FCMP
+ pub const Fcmp = packed struct {
+ decoded0: u3 = 0b000,
+ opc0: Opc0,
+ opc1: u1 = 0b0,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2 = 0b00,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.67 FCMPE
+ pub const Fcmpe = packed struct {
+ decoded0: u3 = 0b000,
+ opc0: Opc0,
+ opc1: u1 = 0b1,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2 = 0b00,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Opc0 = enum(u1) {
+ register = 0b00,
+ zero = 0b01,
+ };
+ };
+
+ /// Floating-point immediate
+ pub const FloatImmediate = packed union {
+ group: @This().Group,
+ fmov: Fmov,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u3 = 0b100,
+ imm8: u8,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.132 FMOV (scalar, immediate)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5 = 0b00000,
+ decoded10: u3 = 0b100,
+ imm8: u8,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+ };
+
+ /// Floating-point data-processing (2 source)
+ pub const FloatDataProcessingTwoSource = packed union {
+ group: @This().Group,
+ fmul: Fmul,
+ fdiv: Fdiv,
+ fadd: Fadd,
+ fsub: Fsub,
+ fmax: Fmax,
+ fmin: Fmin,
+ fmaxnm: Fmaxnm,
+ fminnm: Fminnm,
+ fnmul: Fnmul,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.136 FMUL (scalar)
+ pub const Fmul = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmul,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.98 FDIV (scalar)
+ pub const Fdiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fdiv,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.50 FADD (scalar)
+ pub const Fadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fadd,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.174 FSUB (scalar)
+ pub const Fsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fsub,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.102 FMAX (scalar)
+ pub const Fmax = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmax,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.112 FMIN (scalar)
+ pub const Fmin = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmin,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.104 FMAXNM (scalar)
+ pub const Fmaxnm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmaxnm,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.114 FMINNM (scalar)
+ pub const Fminnm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fminnm,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.143 FNMUL (scalar)
+ pub const Fnmul = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fnmul,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Opcode = enum(u4) {
+ fmul = 0b0000,
+ fdiv = 0b0001,
+ fadd = 0b0010,
+ fsub = 0b0011,
+ fmax = 0b0100,
+ fmin = 0b0101,
+ fmaxnm = 0b0110,
+ fminnm = 0b0111,
+ fnmul = 0b1000,
+ _,
+ };
+ };
+
+ /// Floating-point data-processing (3 source)
+ pub const FloatDataProcessingThreeSource = packed union {
+ group: @This().Group,
+ fmadd: Fmadd,
+ fmsub: Fmsub,
+ fnmadd: Fnmadd,
+ fnmsub: Fnmsub,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp,
+ Rm: Register.Encoded,
+ o1: u1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.100 FMADD
+ pub const Fmadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ o1: O1 = .fm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.133 FMSUB
+ pub const Fmsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ o1: O1 = .fm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.141 FNMADD
+ pub const Fnmadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ o1: O1 = .fnm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.142 FNMSUB
+ pub const Fnmsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ o1: O1 = .fnm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const O1 = enum(u1) {
+ fm = 0b0,
+ fnm = 0b1,
+ };
+ };
+
+ pub const Q = enum(u1) {
+ double = 0b0,
+ quad = 0b1,
+ };
+
+ pub const Size = enum(u2) {
+ byte = 0b00,
+ half = 0b01,
+ single = 0b10,
+ double = 0b11,
+
+ pub fn toVectorSize(s: Size) Register.VectorSize {
+ return switch (s) {
+ .byte => .byte,
+ .half => .half,
+ .single => .single,
+ .double => .double,
+ };
+ }
+
+ pub fn fromVectorSize(vs: Register.VectorSize) Size {
+ return switch (vs) {
+ .byte => .byte,
+ .half => .half,
+ .single => .single,
+ .double => .double,
+ };
+ }
+ };
+
+ pub const Ftype = enum(u2) {
+ single = 0b00,
+ double = 0b01,
+ quad = 0b10,
+ half = 0b11,
+ };
+ };
+
+ pub const AddSubtractOp = enum(u1) {
+ add = 0b0,
+ sub = 0b1,
+ };
+
+ pub const LogicalOpc = enum(u2) {
+ @"and" = 0b00,
+ orr = 0b01,
+ eor = 0b10,
+ ands = 0b11,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ reserved: Reserved,
+ sme: Sme,
+ sve: Sve,
+ data_processing_immediate: DataProcessingImmediate,
+ branch_exception_generating_system: BranchExceptionGeneratingSystem,
+ load_store: LoadStore,
+ data_processing_register: DataProcessingRegister,
+ data_processing_vector: DataProcessingVector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op1) {
+ 0b0000 => switch (inst.group.op0) {
+ 0b0 => .{ .reserved = inst.reserved },
+ 0b1 => .{ .sme = inst.sme },
+ },
+ 0b0001 => .unallocated,
+ 0b0010 => .{ .sve = inst.sve },
+ 0b0011 => .unallocated,
+ 0b1000, 0b1001 => .{ .data_processing_immediate = inst.data_processing_immediate },
+ 0b1010, 0b1011 => .{ .branch_exception_generating_system = inst.branch_exception_generating_system },
+ 0b0100, 0b0110, 0b1100, 0b1110 => .{ .load_store = inst.load_store },
+ 0b0101, 0b1101 => .{ .data_processing_register = inst.data_processing_register },
+ 0b0111, 0b1111 => .{ .data_processing_vector = inst.data_processing_vector },
+ };
+ }
+
+ /// C6.2.1 ADC
+ pub fn adc(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .adc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.2 ADCS
+ pub fn adcs(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .adcs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.3 ADD (extended register)
+ /// C6.2.4 ADD (immediate)
+ /// C6.2.5 ADD (shifted register)
+ pub fn add(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .add = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .add = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .add = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C7.2.4 ADDP (scalar)
+ /// C7.2.5 ADDP (vector)
+ pub fn addp(d: Register, n: Register, form: union(enum) {
+ scalar,
+ vector: Register,
+ }) Instruction {
+ switch (form) {
+ .scalar => {
+ assert(d.format.scalar == .double and n.format.vector == .@"2d");
+ return .{ .data_processing_vector = .{ .simd_scalar_pairwise = .{
+ .addp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = .double,
+ },
+ } } };
+ },
+ .vector => |m| {
+ const arrangement = d.format.vector;
+ assert(arrangement != .@"1d" and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .addp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.7 ADDS (extended register)
+ /// C6.2.8 ADDS (immediate)
+ /// C6.2.9 ADDS (shifted register)
+ pub fn adds(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C7.2.6 ADDV
+ pub fn addv(d: Register, n: Register) Instruction {
+ const arrangement = n.format.vector;
+ assert(arrangement.len() > 2 and d.format.scalar == arrangement.elemSize().toVectorSize());
+ return .{ .data_processing_vector = .{ .simd_across_lanes = .{
+ .addv = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.10 ADR
+ pub fn adr(d: Register, label: i21) Instruction {
+ assert(d.format.integer == .doubleword);
+ return .{ .data_processing_immediate = .{ .pc_relative_addressing = .{
+ .adr = .{
+ .Rd = d.alias.encode(.{}),
+ .immhi = @intCast(label >> 2),
+ .immlo = @truncate(@as(u21, @bitCast(label))),
+ },
+ } } };
+ }
+ /// C6.2.11 ADRP
+ pub fn adrp(d: Register, label: i33) Instruction {
+ assert(d.format.integer == .doubleword);
+ const imm: i21 = @intCast(@shrExact(label, 12));
+ return .{ .data_processing_immediate = .{ .pc_relative_addressing = .{
+ .adrp = .{
+ .Rd = d.alias.encode(.{}),
+ .immhi = @intCast(imm >> 2),
+ .immlo = @truncate(@as(u21, @bitCast(imm))),
+ },
+ } } };
+ }
+ /// C6.2.12 AND (immediate)
+ /// C6.2.13 AND (shifted register)
+ /// C7.2.11 AND (vector)
+ pub fn @"and"(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.14 ANDS (immediate)
+ /// C6.2.15 ANDS (shifted register)
+ pub fn ands(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .ands = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .ands = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.18 ASRV
+ pub fn asrv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .asrv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.25 B
+ pub fn b(label: i28) Instruction {
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_immediate = .{
+ .b = .{ .imm26 = @intCast(@shrExact(label, 2)) },
+ } } };
+ }
+ /// C6.2.26 B.cond
+ pub fn @"b."(cond: ConditionCode, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .conditional_branch_immediate = .{
+ .b = .{
+ .cond = cond,
+ .imm19 = @intCast(@shrExact(label, 2)),
+ },
+ } } };
+ }
+ /// C6.2.27 BC.cond
+ pub fn @"bc."(cond: ConditionCode, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .conditional_branch_immediate = .{
+ .bc = .{
+ .cond = cond,
+ .imm19 = @intCast(@shrExact(label, 2)),
+ },
+ } } };
+ }
+ /// C6.2.30 BFM
+ pub fn bfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .bfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.32 BIC (shifted register)
+ /// C7.2.20 BIC (vector, immediate)
+ /// C7.2.21 BIC (vector, register)
+ pub fn bic(d: Register, n: Register, form: union(enum) {
+ shifted_immediate: struct { immediate: u8, lsl: u5 = 0 },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ else => unreachable,
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| switch (form) {
+ else => unreachable,
+ .shifted_immediate => |shifted_immediate| {
+ assert(n.alias == d.alias and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(shifted_immediate.immediate >> 0),
+ .cmode = switch (arrangement) {
+ else => unreachable,
+ .@"4h", .@"8h" => @as(u3, 0b100) |
+ @as(u3, @as(u1, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ .@"2s", .@"4s" => @as(u3, 0b000) |
+ @as(u3, @as(u2, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ },
+ .imm3 = @intCast(shifted_immediate.immediate >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ .register => |m| {
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ },
+ }
+ }
+ /// C6.2.33 BICS (shifted register)
+ pub fn bics(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .bics = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.34 BL
+ pub fn bl(label: i28) Instruction {
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_immediate = .{
+ .bl = .{ .imm26 = @intCast(@shrExact(label, 2)) },
+ } } };
+ }
+ /// C6.2.35 BLR
+ pub fn blr(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .blr = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.37 BR
+ pub fn br(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .br = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.40 BRK
+ pub fn brk(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .brk = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.46 CBNZ
+ pub fn cbnz(t: Register, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .compare_branch_immediate = .{
+ .cbnz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(label, 2)),
+ .sf = t.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.47 CBZ
+ pub fn cbz(t: Register, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .compare_branch_immediate = .{
+ .cbz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(label, 2)),
+ .sf = t.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.48 CCMN (immediate)
+ /// C6.2.49 CCMN (register)
+ pub fn ccmn(
+ n: Register,
+ form: union(enum) { register: Register, immediate: u5 },
+ nzcv: DataProcessingRegister.Nzcv,
+ cond: ConditionCode,
+ ) Instruction {
+ const sf = n.format.integer;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_compare_register = .{
+ .ccmn = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .immediate => |imm| return .{ .data_processing_register = .{ .conditional_compare_immediate = .{
+ .ccmn = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .imm5 = imm,
+ .sf = sf,
+ },
+ } } },
+ }
+ }
+ /// C6.2.50 CCMP (immediate)
+ /// C6.2.51 CCMP (register)
+ pub fn ccmp(
+ n: Register,
+ form: union(enum) { register: Register, immediate: u5 },
+ nzcv: DataProcessingRegister.Nzcv,
+ cond: ConditionCode,
+ ) Instruction {
+ const sf = n.format.integer;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_compare_register = .{
+ .ccmp = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .immediate => |imm| return .{ .data_processing_register = .{ .conditional_compare_immediate = .{
+ .ccmp = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .imm5 = imm,
+ .sf = sf,
+ },
+ } } },
+ }
+ }
+ /// C6.2.56 CLREX
+ pub fn clrex(imm: u4) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .clrex = .{
+ .CRm = imm,
+ },
+ } } };
+ }
+ /// C6.2.58 CLZ
+ pub fn clz(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .clz = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.38 CNT
+ pub fn cnt(d: Register, n: Register) Instruction {
+ const arrangement = d.format.vector;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_two_register_miscellaneous = .{
+ .cnt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.103 CSEL
+ pub fn csel(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csel = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.106 CSINC
+ pub fn csinc(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csinc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.107 CSINV
+ pub fn csinv(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csinv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.108 CSNEG
+ pub fn csneg(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csneg = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.110 DCPS1
+ pub fn dcps1(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps1 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.111 DCPS2
+ pub fn dcps2(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps2 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.112 DCPS3
+ pub fn dcps3(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps3 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.116 DSB
+ pub fn dsb(option: BranchExceptionGeneratingSystem.Barriers.Option) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .dsb = .{
+ .CRm = option,
+ },
+ } } };
+ }
+ /// C6.2.118 EON (shifted register)
+ pub fn eon(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .eon = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.119 EOR (immediate)
+ /// C6.2.120 EOR (shifted register)
+ /// C7.2.41 EOR (vector)
+ pub fn eor(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.124 EXTR
+ pub fn extr(d: Register, n: Register, m: Register, lsb: u6) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_immediate = .{ .extract = .{
+ .extr = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imms = switch (sf) {
+ .word => @as(u5, @intCast(lsb)),
+ .doubleword => @as(u6, @intCast(lsb)),
+ },
+ .Rm = m.alias.encode(.{}),
+ .N = sf,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.46 FABS (scalar)
+ pub fn fabs(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fabs = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.50 FADD (scalar)
+ pub fn fadd(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.66 FCMP
+ pub fn fcmp(n: Register, form: union(enum) { register: Register, zero }) Instruction {
+ const ftype = n.format.scalar;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmp = .{
+ .opc0 = .register,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ .zero => return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmp = .{
+ .opc0 = .register,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = @enumFromInt(0b00000),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ }
+ }
+ /// C7.2.67 FCMPE
+ pub fn fcmpe(n: Register, form: union(enum) { register: Register, zero }) Instruction {
+ const ftype = n.format.scalar;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmpe = .{
+ .opc0 = .zero,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ .zero => return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmpe = .{
+ .opc0 = .zero,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = @enumFromInt(0b00000),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ }
+ }
+ /// C7.2.69 FCVT
+ pub fn fcvt(d: Register, n: Register) Instruction {
+ assert(d.format.scalar != n.format.scalar);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fcvt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opc = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.71 FCVTAS (scalar)
+ pub fn fcvtas(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtas = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.73 FCVTAU (scalar)
+ pub fn fcvtau(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtau = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.76 FCVTMS (scalar)
+ pub fn fcvtms(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtms = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.78 FCVTMU (scalar)
+ pub fn fcvtmu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtmu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.81 FCVTNS (scalar)
+ pub fn fcvtns(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtns = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.83 FCVTNU (scalar)
+ pub fn fcvtnu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtnu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.85 FCVTPS (scalar)
+ pub fn fcvtps(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtps = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.87 FCVTPU (scalar)
+ pub fn fcvtpu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtpu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.92 FCVTZS (scalar, integer)
+ pub fn fcvtzs(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtzs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.96 FCVTZU (scalar, integer)
+ pub fn fcvtzu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtzu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.98 FDIV (scalar)
+ pub fn fdiv(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fdiv = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.99 FJCVTZS
+ pub fn fjcvtzs(d: Register, n: Register) Instruction {
+ assert(d.format.integer == .word);
+ assert(n.format.scalar == .double);
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fjcvtzs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ },
+ } } };
+ }
+ /// C7.2.100 FMADD
+ pub fn fmadd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fmadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.102 FMAX (scalar)
+ pub fn fmax(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmax = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.104 FMAXNM (scalar)
+ pub fn fmaxnm(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmaxnm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.112 FMIN (scalar)
+ pub fn fmin(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmin = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.114 FMINNM (scalar)
+ pub fn fminnm(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fminnm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.129 FMOV (vector, immediate)
+ /// C7.2.130 FMOV (register)
+ /// C7.2.131 FMOV (general)
+ /// C7.2.132 FMOV (scalar, immediate)
+ pub fn fmov(d: Register, form: union(enum) { immediate: f16, register: Register }) Instruction {
+ switch (form) {
+ .immediate => |immediate| {
+ const repr: std.math.FloatRepr(f16) = @bitCast(immediate);
+ const imm: u8 = @bitCast(@as(packed struct(u8) {
+ mantissa: u4,
+ exponent: i3,
+ sign: std.math.Sign,
+ }, .{
+ .mantissa = @intCast(@shrExact(repr.mantissa, 6)),
+ .exponent = @intCast(repr.exponent.unbias() - 1),
+ .sign = repr.sign,
+ }));
+ switch (d.format) {
+ else => unreachable,
+ .scalar => |ftype| return .{ .data_processing_vector = .{ .float_immediate = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm8 = imm,
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ .vector => |arrangement| {
+ assert(arrangement.len() > 1 and arrangement.elemSize() != .byte);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(imm >> 0),
+ .imm3 = @intCast(imm >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ },
+ .register => |n| switch (d.format) {
+ else => unreachable,
+ .integer => |sf| switch (n.format) {
+ else => unreachable,
+ .scalar => |ftype| {
+ switch (ftype) {
+ else => unreachable,
+ .half => {},
+ .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opcode = .float_to_integer,
+ .rmode = .@"0",
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .element => |element| return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opcode = .float_to_integer,
+ .rmode = switch (element.index) {
+ else => unreachable,
+ 1 => .@"1",
+ },
+ .ftype = switch (element.size) {
+ else => unreachable,
+ .double => .quad,
+ },
+ .sf = sf,
+ },
+ } } },
+ },
+ .scalar => |ftype| switch (n.format) {
+ else => unreachable,
+ .integer => {
+ const sf = n.format.integer;
+ switch (ftype) {
+ else => unreachable,
+ .half => {},
+ .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .opcode = .integer_to_float,
+ .rmode = .@"0",
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .scalar => {
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ },
+ .element => |element| switch (n.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .opcode = .integer_to_float,
+ .rmode = switch (element.index) {
+ else => unreachable,
+ 1 => .@"1",
+ },
+ .ftype = switch (element.size) {
+ else => unreachable,
+ .double => .quad,
+ },
+ .sf = sf,
+ },
+ } } },
+ },
+ },
+ }
+ }
+ /// C7.2.133 FMSUB
+ pub fn fmsub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fmsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.136 FMUL (scalar)
+ pub fn fmul(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmul = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.140 FNEG (scalar)
+ pub fn fneg(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fneg = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.141 FNMADD
+ pub fn fnmadd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fnmadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.142 FNMSUB
+ pub fn fnmsub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fnmsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.143 FNMUL (scalar)
+ pub fn fnmul(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fnmul = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.156 FRINTA (scalar)
+ pub fn frinta(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frinta = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.158 FRINTI (scalar)
+ pub fn frinti(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frinti = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.160 FRINTM (scalar)
+ pub fn frintm(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.162 FRINTN (scalar)
+ pub fn frintn(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintn = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.164 FRINTP (scalar)
+ pub fn frintp(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.166 FRINTX (scalar)
+ pub fn frintx(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintx = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.168 FRINTZ (scalar)
+ pub fn frintz(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintz = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.172 FSQRT (scalar)
+ pub fn fsqrt(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fsqrt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.174 FSUB (scalar)
+ pub fn fsub(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C6.2.126 HINT
+ pub fn hint(imm: u7) Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .group = .{
+ .op2 = @truncate(imm >> 0),
+ .CRm = @intCast(imm >> 3),
+ },
+ } } };
+ }
+ /// C6.2.127 HLT
+ pub fn hlt(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .hlt = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.128 HVC
+ pub fn hvc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .hvc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.131 ISB
+ pub fn isb(option: BranchExceptionGeneratingSystem.Barriers.Option) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .isb = .{
+ .CRm = option,
+ },
+ } } };
+ }
+ /// C6.2.164 LDP
+ /// C7.2.190 LDP (SIMD&FP)
+ pub fn ldp(t1: Register, t2: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i10 },
+ pre_index: struct { base: Register, index: i10 },
+ signed_offset: struct { base: Register, offset: i10 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t1.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(t2.format.integer == sf);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(post_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(pre_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ .scalar => |vs| {
+ assert(t2.format.scalar == vs);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(post_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(pre_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ }
+ }
+ /// C6.2.166 LDR (immediate)
+ /// C6.2.167 LDR (literal)
+ /// C6.2.168 LDR (register)
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ /// C7.2.192 LDR (literal, SIMD&FP)
+ /// C7.2.193 LDR (register, SIMD&FP)
+ pub fn ldr(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u16 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ .sf = sf,
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (sf) {
+ .word => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 2 => true,
+ else => unreachable,
+ },
+ .doubleword => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ },
+ .scalar => |vs| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @intFromEnum(vs))),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ .opc = .encode(vs),
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (vs) {
+ else => unreachable,
+ .byte => switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .half => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .single => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 2 => true,
+ else => unreachable,
+ },
+ .double => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ .quad => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 4 => true,
+ else => unreachable,
+ },
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ },
+ }
+ }
+ /// C6.2.170 LDRB (immediate)
+ /// C6.2.171 LDRB (register)
+ pub fn ldrb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.172 LDRH (immediate)
+ /// C6.2.173 LDRH (register)
+ pub fn ldrh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.174 LDRSB (immediate)
+ /// C6.2.175 LDRSB (register)
+ pub fn ldrsb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ const sf = t.format.integer;
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.176 LDRSH (immediate)
+ /// C6.2.177 LDRSH (register)
+ pub fn ldrsh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ const sf = t.format.integer;
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.178 LDRSW (immediate)
+ /// C6.2.179 LDRSW (literal)
+ /// C6.2.180 LDRSW (register)
+ pub fn ldrsw(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u14 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Integer.Option,
+ amount: LoadStore.RegisterRegisterOffset.Integer.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Integer.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .doubleword);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 2)),
+ },
+ } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ },
+ } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => 0b0,
+ 2 => 0b1,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.202 LDUR
+ /// C7.2.194 LDUR (SIMD&FP)
+ pub fn ldur(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldur = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .sf = sf,
+ },
+ } } } },
+ .scalar => |vs| return .{ .load_store = .{ .register_unscaled_immediate = .{ .vector = .{
+ .ldur = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } },
+ }
+ }
+ /// C6.2.203 LDURB
+ pub fn ldurb(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldurb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.204 LDURH
+ pub fn ldurh(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldurh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.205 LDURSB
+ pub fn ldursb(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc0 = ~@intFromEnum(t.format.integer),
+ },
+ } } } };
+ }
+ /// C6.2.206 LDURSH
+ pub fn ldursh(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc0 = ~@intFromEnum(t.format.integer),
+ },
+ } } } };
+ }
+ /// C6.2.207 LDURSW
+ pub fn ldursw(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .doubleword and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.214 LSLV
+ pub fn lslv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .lslv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.217 LSRV
+ pub fn lsrv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .lsrv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.218 MADD
+ pub fn madd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf and a.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .madd = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.204 MOVI
+ pub fn movi(d: Register, imm8: u8, shift: union(enum) { lsl: u5, msl: u5, replicate }) Instruction {
+ const arrangement = switch (d.format) {
+ else => unreachable,
+ .scalar => |vs| switch (vs) {
+ else => unreachable,
+ .double => .@"1d",
+ },
+ .vector => |arrangement| switch (arrangement) {
+ .@"1d" => unreachable,
+ else => arrangement,
+ },
+ };
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .movi = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(imm8 >> 0),
+ .cmode = switch (shift) {
+ .lsl => |amount| switch (arrangement) {
+ else => unreachable,
+ .@"8b", .@"16b" => @as(u4, 0b1110) |
+ @as(u4, @as(u0, @intCast(@shrExact(amount, 3)))) << 1,
+ .@"4h", .@"8h" => @as(u4, 0b1000) |
+ @as(u4, @as(u1, @intCast(@shrExact(amount, 3)))) << 1,
+ .@"2s", .@"4s" => @as(u4, 0b0000) |
+ @as(u4, @as(u2, @intCast(@shrExact(amount, 3)))) << 1,
+ },
+ .msl => |amount| switch (arrangement) {
+ else => unreachable,
+ .@"2s", .@"4s" => @as(u4, 0b1100) |
+ @as(u4, @as(u1, @intCast(@shrExact(amount, 3) - 1))) << 0,
+ },
+ .replicate => switch (arrangement) {
+ else => unreachable,
+ .@"1d", .@"2d" => 0b1110,
+ },
+ },
+ .imm3 = @intCast(imm8 >> 5),
+ .op = switch (shift) {
+ .lsl, .msl => 0b0,
+ .replicate => 0b1,
+ },
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.225 MOVK
+ pub fn movk(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movk = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.226 MOVN
+ pub fn movn(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movn = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.227 MOVZ
+ pub fn movz(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movz = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.228 MRS
+ pub fn mrs(t: Register, systemreg: Register.System) Instruction {
+ assert(t.format.integer == .doubleword and systemreg.op0 >= 0b10);
+ return .{ .branch_exception_generating_system = .{ .system_register_move = .{
+ .mrs = .{
+ .Rt = t.alias.encode(.{}),
+ .systemreg = systemreg,
+ },
+ } } };
+ }
+ /// C6.2.230 MSR (register)
+ pub fn msr(systemreg: Register.System, t: Register) Instruction {
+ assert(systemreg.op0 >= 0b10 and t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system_register_move = .{
+ .msr = .{
+ .Rt = t.alias.encode(.{}),
+ .systemreg = systemreg,
+ },
+ } } };
+ }
+ /// C6.2.231 MSUB
+ pub fn msub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf and a.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .msub = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.238 NOP
+ pub fn nop() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .nop = .{},
+ } } };
+ }
+ /// C6.2.239 ORN (shifted register)
+ /// C7.2.211 ORN (vector)
+ pub fn orn(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .orn = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .orn = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.240 ORR (immediate)
+ /// C6.2.241 ORR (shifted register)
+ /// C7.2.212 ORR (vector, immediate)
+ /// C7.2.213 ORR (vector, register)
+ pub fn orr(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ shifted_immediate: struct { immediate: u8, lsl: u5 = 0 },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_immediate => unreachable,
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| switch (form) {
+ else => unreachable,
+ .shifted_immediate => |shifted_immediate| {
+ assert(n.alias == d.alias and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(shifted_immediate.immediate >> 0),
+ .cmode = switch (arrangement) {
+ else => unreachable,
+ .@"4h", .@"8h" => @as(u3, 0b100) |
+ @as(u3, @as(u1, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ .@"2s", .@"4s" => @as(u3, 0b000) |
+ @as(u3, @as(u2, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ },
+ .imm3 = @intCast(shifted_immediate.immediate >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ .register => |m| {
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ },
+ }
+ }
+ /// C6.2.247 PRFM (immediate)
+ /// C6.2.248 PRFM (literal)
+ /// C6.2.249 PRFM (register)
+ pub fn prfm(prfop: LoadStore.PrfOp, form: union(enum) {
+ unsigned_offset: struct { base: Register, offset: u15 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ form: switch (form) {
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 3)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.253 RBIT
+ pub fn rbit(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rbit = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.254 RET
+ pub fn ret(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .ret = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.256 REV
+ pub fn rev(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .opc0 = sf,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.257 REV16
+ pub fn rev16(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev16 = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.258 REV32
+ pub fn rev32(d: Register, n: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev32 = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.263 RORV
+ pub fn rorv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .rorv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.264 SB
+ pub fn sb() Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .sb = .{},
+ } } };
+ }
+ /// C6.2.265 SBC
+ pub fn sbc(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .sbc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.266 SBCS
+ pub fn sbcs(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .sbcs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.268 SBFM
+ pub fn sbfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .sbfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.236 SCVTF (scalar, integer)
+ pub fn scvtf(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .scvtf = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .ftype = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = n.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.270 SDIV
+ pub fn sdiv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .sdiv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.280 SEV
+ pub fn sev() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .sev = .{},
+ } } };
+ }
+ /// C6.2.281 SEVL
+ pub fn sevl() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .sevl = .{},
+ } } };
+ }
+ /// C6.2.282 SMADDL
+ pub fn smaddl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smaddl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.283 SMC
+ pub fn smc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .smc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C7.2.279 SMOV
+ pub fn smov(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ const vs = n.format.element.size;
+ switch (vs) {
+ else => unreachable,
+ .byte, .half => {},
+ .single => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .simd_copy = .{
+ .smov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .imm5 = switch (vs) {
+ else => unreachable,
+ .byte => @as(u5, @as(u4, @intCast(n.format.element.index))) << 1 | @as(u5, 0b1) << 0,
+ .half => @as(u5, @as(u3, @intCast(n.format.element.index))) << 2 | @as(u5, 0b10) << 0,
+ .single => @as(u5, @as(u2, @intCast(n.format.element.index))) << 3 | @as(u5, 0b100) << 0,
+ },
+ .Q = sf,
+ },
+ } } };
+ }
+ /// C6.2.287 SMSUBL
+ pub fn smsubl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smsubl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.288 SMULH
+ pub fn smulh(d: Register, n: Register, m: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword and m.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smulh = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.321 STP
+ /// C7.2.330 STP (SIMD&FP)
+ pub fn stp(t1: Register, t2: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i10 },
+ pre_index: struct { base: Register, index: i10 },
+ signed_offset: struct { base: Register, offset: i10 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t1.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(t2.format.integer == sf);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(post_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(pre_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ .scalar => |vs| {
+ assert(t2.format.scalar == vs);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(post_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(pre_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ }
+ }
+ /// C6.2.322 STR (immediate)
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub fn str(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u16 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ },
+ .scalar => |vs| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @intFromEnum(vs))),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ },
+ }
+ }
+ /// C6.2.324 STRB (immediate)
+ pub fn strb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ }
+ }
+ /// C6.2.326 STRH (immediate)
+ pub fn strh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ }
+ }
+ /// C6.2.346 STUR
+ /// C7.2.333 STUR (SIMD&FP)
+ pub fn stur(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .stur = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .sf = sf,
+ },
+ } } } },
+ .scalar => |vs| return .{ .load_store = .{ .register_unscaled_immediate = .{ .vector = .{
+ .stur = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } },
+ }
+ }
+ /// C6.2.347 STURB
+ pub fn sturb(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .sturb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.348 STURH
+ pub fn sturh(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .sturh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.356 SUB (extended register)
+ /// C6.2.357 SUB (immediate)
+ /// C6.2.358 SUB (shifted register)
+ pub fn sub(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C6.2.362 SUBS (extended register)
+ /// C6.2.363 SUBS (immediate)
+ /// C6.2.364 SUBS (shifted register)
+ pub fn subs(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C6.2.365 SVC
+ pub fn svc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .svc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.372 SYS
+ pub fn sys(op1: u3, n: u4, m: u4, op2: u3, t: Register) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system = .{
+ .sys = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ },
+ } } };
+ }
+ /// C6.2.373 SYSL
+ pub fn sysl(t: Register, op1: u3, n: u4, m: u4, op2: u3) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system = .{
+ .sysl = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ },
+ } } };
+ }
+ /// C6.2.374 TBNZ
+ pub fn tbnz(t: Register, imm: u6, label: i16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .test_branch_immediate = .{
+ .tbnz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm14 = @intCast(@shrExact(label, 2)),
+ .b40 = @truncate(switch (t.format.integer) {
+ .word => @as(u5, @intCast(imm)),
+ .doubleword => imm,
+ }),
+ .b5 = @intCast(imm >> 5),
+ },
+ } } };
+ }
+ /// C6.2.375 TBZ
+ pub fn tbz(t: Register, imm: u6, label: i16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .test_branch_immediate = .{
+ .tbz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm14 = @intCast(@shrExact(label, 2)),
+ .b40 = @truncate(switch (t.format.integer) {
+ .word => @as(u5, @intCast(imm)),
+ .doubleword => imm,
+ }),
+ .b5 = @intCast(imm >> 5),
+ },
+ } } };
+ }
+ /// C6.2.376 TCANCEL
+ pub fn tcancel(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .tcancel = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.385 UBFM
+ pub fn ubfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .ubfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.355 UCVTF (scalar, integer)
+ pub fn ucvtf(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .ucvtf = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .ftype = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = n.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.387 UDF
+ pub fn udf(imm: u16) Instruction {
+ return .{ .reserved = .{
+ .udf = .{ .imm16 = imm },
+ } };
+ }
+ /// C6.2.388 UDIV
+ pub fn udiv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .udiv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.389 UMADDL
+ pub fn umaddl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umaddl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.391 UMSUBL
+ pub fn umsubl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umsubl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C7.2.371 UMOV
+ pub fn umov(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ const vs = n.format.element.size;
+ switch (vs) {
+ else => unreachable,
+ .byte, .half, .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .simd_copy = .{
+ .umov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .imm5 = switch (vs) {
+ else => unreachable,
+ .byte => @as(u5, @as(u4, @intCast(n.format.element.index))) << 1 | @as(u5, 0b1) << 0,
+ .half => @as(u5, @as(u3, @intCast(n.format.element.index))) << 2 | @as(u5, 0b10) << 0,
+ .single => @as(u5, @as(u2, @intCast(n.format.element.index))) << 3 | @as(u5, 0b100) << 0,
+ .double => @as(u5, @as(u1, @intCast(n.format.element.index))) << 4 | @as(u5, 0b1000) << 0,
+ },
+ .Q = sf,
+ },
+ } } };
+ }
+ /// C6.2.392 UMULH
+ pub fn umulh(d: Register, n: Register, m: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword and m.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umulh = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.396 WFE
+ pub fn wfe() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .wfe = .{},
+ } } };
+ }
+ /// C6.2.398 WFI
+ pub fn wfi() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .wfi = .{},
+ } } };
+ }
+ /// C6.2.402 YIELD
+ pub fn yield() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .yield = .{},
+ } } };
+ }
+
+ pub const size = @divExact(@bitSizeOf(Backing), 8);
+ pub const Backing = u32;
+ pub fn read(mem: *const [size]u8) Instruction {
+ return @bitCast(std.mem.readInt(Backing, mem, .little));
+ }
+ pub fn write(inst: Instruction, mem: *[size]u8) void {
+ std.mem.writeInt(Backing, mem, @bitCast(inst), .little);
+ }
+
+ pub fn format(inst: Instruction, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ const dis: aarch64.Disassemble = .{};
+ try dis.printInstruction(inst, writer);
+ }
+
+ comptime {
+ @setEvalBranchQuota(68_000);
+ verify(@typeName(Instruction), Instruction);
+ }
+ fn verify(name: []const u8, Type: type) void {
+ switch (@typeInfo(Type)) {
+ .@"union" => |info| {
+ if (info.layout != .@"packed" or @bitSizeOf(Type) != @bitSizeOf(Backing)) {
+ @compileLog(name ++ " should have u32 abi");
+ }
+ for (info.fields) |field| verify(name ++ "." ++ field.name, field.type);
+ },
+ .@"struct" => |info| {
+ if (info.layout != .@"packed" or info.backing_integer != Backing) {
+ @compileLog(name ++ " should have u32 abi");
+ }
+ var bit_offset = 0;
+ for (info.fields) |field| {
+ if (std.mem.startsWith(u8, field.name, "encoded")) {
+ if (if (std.fmt.parseInt(u5, field.name["encoded".len..], 10)) |encoded_bit_offset| encoded_bit_offset != bit_offset else |_| true) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named encoded{d}", .{ name, field.name, bit_offset }));
+ }
+ if (field.default_value_ptr != null) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named decoded{d}", .{ name, field.name, bit_offset }));
+ }
+ } else if (std.mem.startsWith(u8, field.name, "decoded")) {
+ if (if (std.fmt.parseInt(u5, field.name["decoded".len..], 10)) |decoded_bit_offset| decoded_bit_offset != bit_offset else |_| true) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named decoded{d}", .{ name, field.name, bit_offset }));
+ }
+ if (field.default_value_ptr == null) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named encoded{d}", .{ name, field.name, bit_offset }));
+ }
+ }
+ bit_offset += @bitSizeOf(field.type);
+ }
+ },
+ else => @compileError(name ++ " has an unexpected field type"),
+ }
+ }
+};
+
+const aarch64 = @import("../aarch64.zig");
+const assert = std.debug.assert;
+const std = @import("std");
diff --git a/src/codegen/aarch64/instructions.zon b/src/codegen/aarch64/instructions.zon
new file mode 100644
index 0000000000..48b8eaa21f
--- /dev/null
+++ b/src/codegen/aarch64/instructions.zon
@@ -0,0 +1,1543 @@
+.{
+ // C6.2.3 ADD (extended register)
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Xm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .extend = .{ .extend = .{ .size = .doubleword } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Xm, .option = .extend, .amount = .amount } } },
+ },
+ // C6.2.4 ADD (immediate)
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ // C6.2.5 ADD (shifted register)
+ .{
+ .pattern = "ADD <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ADD <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ADD <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.13 AND (shifted register)
+ .{
+ .pattern = "AND <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .@"and", .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "AND <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .@"and", .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "AND <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .@"and", .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "AND <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .@"and", .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.15 ANDS (shifted register)
+ .{
+ .pattern = "ANDS <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .ands, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ANDS <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .ands, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ANDS <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .ands, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ANDS <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .ands, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.16 ASR (register)
+ .{
+ .pattern = "ASR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .asrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ASR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .asrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.17 ASR (immediate)
+ .{
+ .pattern = "ASR <Wd>, <Wn>, #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sbfm, .Wd, .Wn, .{ .N = .word, .immr = .shift, .imms = 31 } },
+ },
+ .{
+ .pattern = "ASR <Xd>, <Xn>, #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sbfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .shift, .imms = 63 } },
+ },
+ // C6.2.18 ASRV
+ .{
+ .pattern = "ASRV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .asrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ASRV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .asrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.35 BLR
+ .{
+ .pattern = "BLR <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .blr, .Xn },
+ },
+ // C6.2.30 BFM
+ .{
+ .pattern = "BFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .bfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "BFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .bfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.37 BR
+ .{
+ .pattern = "BR <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .br, .Xn },
+ },
+ // C6.2.40 BRK
+ .{
+ .pattern = "BRK #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .brk, .imm },
+ },
+ // C6.2.56 CLREX
+ .{
+ .pattern = "CLREX",
+ .symbols = .{},
+ .encode = .{ .clrex, 0b1111 },
+ },
+ .{
+ .pattern = "CLREX #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .clrex, .imm },
+ },
+ // C6.2.109 DC
+ .{
+ .pattern = "DC IVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0110, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC ISW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0110, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC CSW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b1010, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC CISW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b1110, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC ZVA, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0100, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1010, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CVAU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1011, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CIVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1110, 0b001, .Xt },
+ },
+ // C6.2.110 DCPS1
+ .{
+ .pattern = "DCPS1",
+ .symbols = .{},
+ .encode = .{ .dcps1, 0 },
+ },
+ .{
+ .pattern = "DCPS1 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps1, .imm },
+ },
+ // C6.2.111 DCPS2
+ .{
+ .pattern = "DCPS2",
+ .symbols = .{},
+ .encode = .{ .dcps2, 0 },
+ },
+ .{
+ .pattern = "DCPS2 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps2, .imm },
+ },
+ // C6.2.112 DCPS3
+ .{
+ .pattern = "DCPS3",
+ .symbols = .{},
+ .encode = .{ .dcps3, 0 },
+ },
+ .{
+ .pattern = "DCPS3 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps3, .imm },
+ },
+ // C6.2.116 DSB
+ .{
+ .pattern = "DSB <option>",
+ .symbols = .{
+ .option = .{ .barrier = .{} },
+ },
+ .encode = .{ .dsb, .option },
+ },
+ .{
+ .pattern = "DSB #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .dsb, .imm },
+ },
+ // C6.2.120 EOR (shifted register)
+ .{
+ .pattern = "EOR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .eor, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "EOR <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .eor, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "EOR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .eor, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "EOR <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .eor, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.124 EXTR
+ .{
+ .pattern = "EXTR <Wd>, <Wn>, <Wm>, #<lsb>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .lsb = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .extr, .Wd, .Wn, .Wm, .lsb },
+ },
+ .{
+ .pattern = "EXTR <Xd>, <Xn>, <Xm>, #<lsb>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .lsb = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .extr, .Xd, .Xn, .Xm, .lsb },
+ },
+ // C6.2.126 HINT
+ .{
+ .pattern = "HINT #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 7 } } },
+ },
+ .encode = .{ .hint, .imm },
+ },
+ // C6.2.127 HLT
+ .{
+ .pattern = "HLT #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .hlt, .imm },
+ },
+ // C6.2.128 HVC
+ .{
+ .pattern = "HVC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .hvc, .imm },
+ },
+ // C6.2.129 IC
+ .{
+ .pattern = "IC IALLUIS",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0001, 0b000, .xzr },
+ },
+ .{
+ .pattern = "IC IALLUIS, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0001, 0b000, .Xt },
+ },
+ .{
+ .pattern = "IC IALLU",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0101, 0b000, .xzr },
+ },
+ .{
+ .pattern = "IC IALLU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0101, 0b000, .Xt },
+ },
+ .{
+ .pattern = "IC IVAU",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0101, 0b001, .xzr },
+ },
+ .{
+ .pattern = "IC IVAU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0101, 0b001, .Xt },
+ },
+ // C6.2.131 ISB
+ .{
+ .pattern = "ISB",
+ .symbols = .{},
+ .encode = .{ .isb, .sy },
+ },
+ .{
+ .pattern = "ISB <option>",
+ .symbols = .{
+ .option = .{ .barrier = .{ .only_sy = true } },
+ },
+ .encode = .{ .isb, .option },
+ },
+ .{
+ .pattern = "ISB #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .isb, .imm },
+ },
+ // C6.2.164 LDP
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ // C6.2.166 LDR (immediate)
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 14 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 15 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ // C6.2.212 LSL (register)
+ .{
+ .pattern = "LSL <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lslv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSL <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lslv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.214 LSLV
+ .{
+ .pattern = "LSLV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lslv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSLV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lslv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.215 LSR (register)
+ .{
+ .pattern = "LSR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lsrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lsrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.217 LSRV
+ .{
+ .pattern = "LSRV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lsrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSRV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lsrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.220 MOV (to/from SP)
+ .{
+ .pattern = "MOV WSP, <Wn|WSP>",
+ .symbols = .{
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .wsp, .Wn, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV <Wd|WSP>, WSP",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .Wd, .wsp, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV SP, <Xn|SP>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .sp, .Xn, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV <Xd|SP>, SP",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .Xd, .sp, .{ .immediate = 0 } },
+ },
+ // C6.2.222 MOV (wide immediate)
+ .{
+ .pattern = "MOV <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{ .lsl = .@"0" } },
+ },
+ .{
+ .pattern = "MOV <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{ .lsl = .@"0" } },
+ },
+ // C6.2.224 MOV (register)
+ .{
+ .pattern = "MOV <Wd>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .orr, .Wd, .wzr, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "MOV <Xd>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .orr, .Xd, .xzr, .{ .register = .Xm } },
+ },
+ // C6.2.225 MOVK
+ .{
+ .pattern = "MOVK <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movk, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVK <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movk, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVK <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movk, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVK <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movk, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.226 MOVN
+ .{
+ .pattern = "MOVN <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movn, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVN <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movn, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVN <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movn, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVN <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movn, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.227 MOVZ
+ .{
+ .pattern = "MOVZ <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVZ <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVZ <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVZ <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.228 MRS
+ .{
+ .pattern = "MRS <Xt>, <systemreg>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .systemreg = .systemreg,
+ },
+ .encode = .{ .mrs, .Xt, .systemreg },
+ },
+ // C6.2.230 MSR (register)
+ .{
+ .pattern = "MSR <systemreg>, <Xt>",
+ .symbols = .{
+ .systemreg = .systemreg,
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .msr, .systemreg, .Xt },
+ },
+ // C6.2.234 NEG
+ .{
+ .pattern = "NEG <Wd>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .wzr, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "NEG <Wd>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sub, .Wd, .wzr, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "NEG <Xd>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .xzr, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "NEG <Xd>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sub, .Xd, .xzr, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.238 NOP
+ .{
+ .pattern = "NOP",
+ .symbols = .{},
+ .encode = .{.nop},
+ },
+ // C6.2.241 ORR (shifted register)
+ .{
+ .pattern = "ORR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .orr, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ORR <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .orr, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ORR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .orr, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ORR <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .orr, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.254 RET
+ .{
+ .pattern = "RET",
+ .symbols = .{},
+ .encode = .{ .ret, .x30 },
+ },
+ .{
+ .pattern = "RET <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .ret, .Xn },
+ },
+ // C6.2.261 ROR (immediate)
+ .{
+ .pattern = "ROR <Wd>, <Ws>, #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Ws = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .extr, .Wd, .Ws, .Ws, .shift },
+ },
+ .{
+ .pattern = "ROR <Xd>, <Xs>, #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xs = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .extr, .Xd, .Xs, .Xs, .shift },
+ },
+ // C6.2.262 ROR (register)
+ .{
+ .pattern = "ROR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .rorv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ROR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .rorv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.263 RORV
+ .{
+ .pattern = "RORV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .rorv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "RORV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .rorv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.268 SBFM
+ .{
+ .pattern = "SBFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sbfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "SBFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sbfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.280 SEV
+ .{
+ .pattern = "SEV",
+ .symbols = .{},
+ .encode = .{.sev},
+ },
+ // C6.2.281 SEVL
+ .{
+ .pattern = "SEVL",
+ .symbols = .{},
+ .encode = .{.sevl},
+ },
+ // C6.2.283 SMC
+ .{
+ .pattern = "SMC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .smc, .imm },
+ },
+ // C6.2.321 STP
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ // C6.2.322 STR (immediate)
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Wt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Xt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Wt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Xt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .str, .Wt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 14 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .str, .Wt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .str, .Xt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 15 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .str, .Xt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ // C6.2.356 SUB (extended register)
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Xm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .extend = .{ .extend = .{ .size = .doubleword } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Xm, .option = .extend, .amount = .amount } } },
+ },
+ // C6.2.357 SUB (immediate)
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ // C6.2.358 SUB (shifted register)
+ .{
+ .pattern = "SUB <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "SUB <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "SUB <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.365 SVC
+ .{
+ .pattern = "SVC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .svc, .imm },
+ },
+ // C6.2.376 TCANCEL
+ .{
+ .pattern = "TCANCEL #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .tcancel, .imm },
+ },
+ // C6.2.385 UBFM
+ .{
+ .pattern = "UBFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .ubfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "UBFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .ubfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.387 UDF
+ .{
+ .pattern = "UDF #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .udf, .imm },
+ },
+ // C6.2.396 WFE
+ .{
+ .pattern = "WFE",
+ .symbols = .{},
+ .encode = .{.wfe},
+ },
+ // C6.2.398 WFI
+ .{
+ .pattern = "WFI",
+ .symbols = .{},
+ .encode = .{.wfi},
+ },
+ // C6.2.402 YIELD
+ .{
+ .pattern = "YIELD",
+ .symbols = .{},
+ .encode = .{.yield},
+ },
+}
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index c726c05e1b..832c8b2ea5 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -449,14 +449,15 @@ pub const Function = struct {
if (gop.found_existing) return gop.value_ptr.*;
const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const val = (try f.air.value(ref, pt)).?;
const ty = f.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, pt)) result: {
+ const result: CValue = if (lowersToArray(ty, zcu)) result: {
const ch = &f.object.code_header.writer;
const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)),
});
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -916,7 +917,7 @@ pub const DeclGen = struct {
// Ensure complete type definition is available before accessing fields.
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
- switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) {
+ switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
.begin => {
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try w.writeByte('(');
@@ -3008,7 +3009,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) @import("../codegen.zig").CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -3021,7 +3022,7 @@ pub fn generate(
var function: Function = .{
.value_map = .init(gpa),
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.func_index = func_index,
.object = .{
.dg = .{
@@ -3961,7 +3962,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(src_ty, pt);
+ const is_array = lowersToArray(src_ty, zcu);
const need_memcpy = !is_aligned or is_array;
const w = &f.object.code.writer;
@@ -4044,7 +4045,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !void {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, pt);
+ const is_array = lowersToArray(ret_ty, zcu);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
@@ -4228,7 +4229,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(.fromInterned(ptr_info.child), pt);
+ const is_array = lowersToArray(.fromInterned(ptr_info.child), zcu);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -4873,7 +4874,7 @@ fn airCall(
}
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, pt))
+ if (result_local == .none or !lowersToArray(ret_ty, zcu))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -5971,13 +5972,12 @@ fn fieldLocation(
container_ptr_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- pt: Zcu.PerThread,
+ zcu: *Zcu,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u64,
} {
- const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const container_ty: Type = .fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child);
switch (ip.indexToKey(container_ty.toIntern())) {
@@ -5994,7 +5994,7 @@ fn fieldLocation(
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
- .{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) +
+ .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
else
.begin,
@@ -6076,7 +6076,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(w, container_ptr_ty);
try w.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) {
.begin => try f.writeCValue(w, field_ptr_val, .Other),
.field => |field| {
const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8);
@@ -6131,7 +6131,7 @@ fn fieldPtr(
try f.renderType(w, field_ptr_ty);
try w.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) {
.begin => try f.writeCValue(w, container_ptr_val, .Other),
.field => |field| {
try w.writeByte('&');
@@ -6189,7 +6189,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
inst_ty.intInfo(zcu).signedness
@@ -8573,8 +8573,7 @@ const Vectorize = struct {
}
};
-fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
- const zcu = pt.zcu;
+fn lowersToArray(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.array, .vector => return true,
else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index a570dd5ec0..111fc6ec14 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -20,6 +20,7 @@ const Package = @import("../Package.zig");
const Air = @import("../Air.zig");
const Value = @import("../Value.zig");
const Type = @import("../Type.zig");
+const codegen = @import("../codegen.zig");
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("wasm/abi.zig");
const aarch64_c_abi = @import("aarch64/abi.zig");
@@ -1131,7 +1132,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) !void {
const zcu = pt.zcu;
const comp = zcu.comp;
@@ -1489,7 +1490,7 @@ pub const Object = struct {
var fg: FuncGen = .{
.gpa = gpa,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .naked,
@@ -4210,7 +4211,7 @@ pub const Object = struct {
.eu_payload => |eu_ptr| try o.lowerPtr(
pt,
eu_ptr,
- offset + @import("../codegen.zig").errUnionPayloadOffset(
+ offset + codegen.errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
zcu,
),
@@ -6050,10 +6051,10 @@ pub const FuncGen = struct {
const target_blocks = dispatch_info.case_blocks[0..target_blocks_len];
// Make sure to cast the index to a usize so it's not treated as negative!
- const table_index = try self.wip.cast(
- .zext,
+ const table_index = try self.wip.conv(
+ .unsigned,
try self.wip.bin(.@"sub nuw", cond, jmp_table.min.toValue(), ""),
- try o.lowerType(pt, Type.usize),
+ try o.lowerType(pt, .usize),
"",
);
const target_ptr_ptr = try self.wip.gep(
@@ -6969,7 +6970,7 @@ pub const FuncGen = struct {
.@"struct" => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_type = zcu.typeToStruct(struct_ty).?;
- const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
@@ -11364,7 +11365,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
- const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ const byte_offset = @divExact(zcu.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
if (byte_offset == 0) return struct_ptr;
const usize_ty = try o.lowerType(pt, Type.usize);
const llvm_index = try o.builder.intValue(usize_ty, byte_offset);
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 292f5a62fc..17fa62d18f 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -251,11 +251,11 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) !void {
const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
- try self.genNav(pt, nav, air.*, liveness.*, true);
+ try self.genNav(pt, nav, air.*, liveness.*.?, true);
}
pub fn updateNav(
@@ -5134,7 +5134,7 @@ const NavGen = struct {
.@"struct" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_ty = zcu.typeToPackedStruct(object_ty).?;
- const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(struct_ty, field_index);
const bit_offset_id = try self.constInt(.u16, bit_offset);
const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
diff --git a/src/dev.zig b/src/dev.zig
index 4c602621ec..bf2d957c0d 100644
--- a/src/dev.zig
+++ b/src/dev.zig
@@ -25,14 +25,14 @@ pub const Env = enum {
/// - `zig build-* -fno-emit-bin`
sema,
+ /// - sema
+ /// - `zig build-* -fincremental -fno-llvm -fno-lld -target aarch64-linux --listen=-`
+ @"aarch64-linux",
+
/// - `zig build-* -ofmt=c`
cbe,
/// - sema
- /// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
- @"x86_64-linux",
-
- /// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target powerpc(64)(le)-linux --listen=-`
@"powerpc-linux",
@@ -48,6 +48,10 @@ pub const Env = enum {
/// - `zig build-* -fno-llvm -fno-lld -target wasm32-* --listen=-`
wasm,
+ /// - sema
+ /// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
+ @"x86_64-linux",
+
pub inline fn supports(comptime dev_env: Env, comptime feature: Feature) bool {
return switch (dev_env) {
.full => true,
@@ -153,23 +157,22 @@ pub const Env = enum {
=> true,
else => Env.ast_gen.supports(feature),
},
- .cbe => switch (feature) {
- .legalize,
- .c_backend,
- .c_linker,
- => true,
- else => Env.sema.supports(feature),
- },
- .@"x86_64-linux" => switch (feature) {
+ .@"aarch64-linux" => switch (feature) {
.build_command,
.stdio_listen,
.incremental,
- .legalize,
- .x86_64_backend,
+ .aarch64_backend,
.elf_linker,
=> true,
else => Env.sema.supports(feature),
},
+ .cbe => switch (feature) {
+ .legalize,
+ .c_backend,
+ .c_linker,
+ => true,
+ else => Env.sema.supports(feature),
+ },
.@"powerpc-linux" => switch (feature) {
.build_command,
.stdio_listen,
@@ -199,6 +202,16 @@ pub const Env = enum {
=> true,
else => Env.sema.supports(feature),
},
+ .@"x86_64-linux" => switch (feature) {
+ .build_command,
+ .stdio_listen,
+ .incremental,
+ .legalize,
+ .x86_64_backend,
+ .elf_linker,
+ => true,
+ else => Env.sema.supports(feature),
+ },
};
}
diff --git a/src/fmt.zig b/src/fmt.zig
index 23e668d245..92ae22e4bc 100644
--- a/src/fmt.zig
+++ b/src/fmt.zig
@@ -348,10 +348,10 @@ fn fmtPathFile(
try fmt.stdout_writer.interface.print("{s}\n", .{file_path});
fmt.any_error = true;
} else {
- var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode });
+ var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode, .write_buffer = &.{} });
defer af.deinit();
- try af.file.writeAll(fmt.out_buffer.getWritten());
+ try af.file_writer.interface.writeAll(fmt.out_buffer.getWritten());
try af.finish();
try fmt.stdout_writer.interface.print("{s}\n", .{file_path});
}
diff --git a/src/link.zig b/src/link.zig
index 0fbd4b28cf..51bb9330f1 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -23,6 +23,7 @@ const dev = @import("dev.zig");
const target_util = @import("target.zig");
const codegen = @import("codegen.zig");
+pub const aarch64 = @import("link/aarch64.zig");
pub const LdScript = @import("link/LdScript.zig");
pub const Queue = @import("link/Queue.zig");
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index dd8ddd30f4..1b8986d337 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1335,9 +1335,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
@@ -2832,58 +2836,33 @@ pub const Relocation = struct {
};
fn resolveAarch64(reloc: Relocation, ctx: Context) void {
+ const Instruction = aarch64_util.encoding.Instruction;
var buffer = ctx.code[reloc.offset..];
switch (reloc.type) {
.got_page, .import_page, .page => {
const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
- const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
- var inst = aarch64_util.Instruction{
- .pc_relative_address = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.pc_relative_address),
- ), buffer[0..4]),
- };
- inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
- inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
+ const pages: i21 = @intCast(target_page - source_page);
+ var inst: Instruction = .read(buffer[0..Instruction.size]);
+ inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(pages >> 2);
+ inst.data_processing_immediate.pc_relative_addressing.group.immlo = @truncate(@as(u21, @bitCast(pages)));
+ inst.write(buffer[0..Instruction.size]);
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!reloc.pcrel);
- const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
- if (isArithmeticOp(buffer[0..4])) {
- var inst = aarch64_util.Instruction{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.add_subtract_immediate),
- ), buffer[0..4]),
- };
- inst.add_subtract_immediate.imm12 = narrowed;
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
- } else {
- var inst = aarch64_util.Instruction{
- .load_store_register = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.load_store_register),
- ), buffer[0..4]),
- };
- const offset: u12 = blk: {
- if (inst.load_store_register.size == 0) {
- if (inst.load_store_register.v == 1) {
- // 128-bit SIMD is scaled by 16.
- break :blk @divExact(narrowed, 16);
- }
- // Otherwise, 8-bit SIMD or ldrb.
- break :blk narrowed;
- } else {
- const denom: u4 = math.powi(u4, 2, inst.load_store_register.size) catch unreachable;
- break :blk @divExact(narrowed, denom);
- }
- };
- inst.load_store_register.offset = offset;
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
+ const narrowed: u12 = @truncate(@as(u64, @intCast(ctx.target_vaddr)));
+ var inst: Instruction = .read(buffer[0..Instruction.size]);
+ switch (inst.decode()) {
+ else => unreachable,
+ .data_processing_immediate => inst.data_processing_immediate.add_subtract_immediate.group.imm12 = narrowed,
+ .load_store => |load_store| inst.load_store.register_unsigned_immediate.group.imm12 =
+ switch (load_store.register_unsigned_immediate.decode()) {
+ .integer => |integer| @shrExact(narrowed, @intFromEnum(integer.group.size)),
+ .vector => |vector| @shrExact(narrowed, @intFromEnum(vector.group.opc1.decode(vector.group.size))),
+ },
}
+ inst.write(buffer[0..Instruction.size]);
},
.direct => {
assert(!reloc.pcrel);
@@ -2934,11 +2913,6 @@ pub const Relocation = struct {
},
}
}
-
- fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @as(u5, @truncate(inst[3]));
- return ((group_decode >> 2) == 4);
- }
};
pub fn addRelocation(coff: *Coff, atom_index: Atom.Index, reloc: Relocation) !void {
@@ -3112,7 +3086,7 @@ const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const Cache = std.Build.Cache;
-const aarch64_util = @import("../arch/aarch64/bits.zig");
+const aarch64_util = link.aarch64;
const allocPrint = std.fmt.allocPrint;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index a1a9dedd4b..4262c329fa 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -2487,7 +2487,13 @@ fn initWipNavInner(
try wip_nav.strp(nav.fqn.toSlice(ip));
const ty: Type = nav_val.typeOf(zcu);
const addr: Loc = .{ .addr_reloc = sym_index };
- const loc: Loc = if (decl.is_threadlocal) .{ .form_tls_address = &addr } else addr;
+ const loc: Loc = if (decl.is_threadlocal) loc: {
+ const target = zcu.comp.root_mod.resolved_target.result;
+ break :loc switch (target.cpu.arch) {
+ .x86_64 => .{ .form_tls_address = &addr },
+ else => .empty,
+ };
+ } else addr;
switch (decl.kind) {
.unnamed_test, .@"test", .decltest, .@"comptime" => unreachable,
.@"const" => {
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 5966bae02c..4bb88f2af3 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -1627,7 +1627,7 @@ const aarch64 = struct {
const S_ = th.targetAddress(target_index, elf_file);
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
- aarch64_util.writeBranchImm(disp, code);
+ util.writeBranchImm(disp, code);
},
.PREL32 => {
@@ -1640,15 +1640,18 @@ const aarch64 = struct {
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
},
+ .ADR_PREL_LO21 => {
+ const value = math.cast(i21, S + A - P) orelse return error.Overflow;
+ util.writeAdrInst(value, code);
+ },
+
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
- const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
},
.ADR_GOT_PAGE => if (target.flags.has_got) {
- const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
} else {
// TODO: relax
var err = try diags.addErrorWithNotes(1);
@@ -1663,12 +1666,12 @@ const aarch64 = struct {
.LD64_GOT_LO12_NC => {
assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A));
- aarch64_util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
+ util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
},
.ADD_ABS_LO12_NC => {
const taddr = @as(u64, @intCast(S + A));
- aarch64_util.writeAddImmInst(@truncate(taddr), code);
+ util.writeAddImmInst(@truncate(taddr), code);
},
.LDST8_ABS_LO12_NC,
@@ -1687,57 +1690,54 @@ const aarch64 = struct {
.LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
else => unreachable,
};
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
const value = math.cast(i12, (S + A - TP) >> 12) orelse
return error.Overflow;
- aarch64_util.writeAddImmInst(@bitCast(value), code);
+ util.writeAddImmInst(@bitCast(value), code);
},
.TLSLE_ADD_TPREL_LO12_NC => {
const value: i12 = @truncate(S + A - TP);
- aarch64_util.writeAddImmInst(@bitCast(value), code);
+ util.writeAddImmInst(@bitCast(value), code);
},
.TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(off, code);
+ util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
} else {
relocs_log.debug(" relaxing adrp => nop", .{});
- mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
+ util.encoding.Instruction.nop().write(code);
}
},
@@ -1746,10 +1746,10 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
- mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
+ util.encoding.Instruction.nop().write(code);
}
},
@@ -1758,32 +1758,18 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(off, code);
+ util.writeAddImmInst(off, code);
} else {
- const old_inst: Instruction = .{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.add_subtract_immediate),
- ), code),
- };
- const rd: Register = @enumFromInt(old_inst.add_subtract_immediate.rd);
- relocs_log.debug(" relaxing add({s}) => movz(x0, {x})", .{ @tagName(rd), S + A - TP });
+ relocs_log.debug(" relaxing add => movz(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(math.cast(i16, (S + A - TP) >> 16) orelse return error.Overflow);
- mem.writeInt(u32, code, Instruction.movz(.x0, value, 16).toU32(), .little);
+ util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
}
},
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
- const old_inst: Instruction = .{
- .unconditional_branch_register = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.unconditional_branch_register),
- ), code),
- };
- const rn: Register = @enumFromInt(old_inst.unconditional_branch_register.rn);
- relocs_log.debug(" relaxing br({s}) => movk(x0, {x})", .{ @tagName(rn), S + A - TP });
+ relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
- mem.writeInt(u32, code, Instruction.movk(.x0, value, 0).toU32(), .little);
+ util.encoding.Instruction.movk(.x0, value, .{}).write(code);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
@@ -1819,9 +1805,7 @@ const aarch64 = struct {
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
const riscv = struct {
diff --git a/src/link/Elf/Thunk.zig b/src/link/Elf/Thunk.zig
index 2af0c9c9d3..59b867be78 100644
--- a/src/link/Elf/Thunk.zig
+++ b/src/link/Elf/Thunk.zig
@@ -95,18 +95,21 @@ const aarch64 = struct {
const sym = elf_file.symbol(ref).?;
const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
const taddr = sym.address(.{}, elf_file);
- const pages = try util.calcNumberOfPages(saddr, taddr);
- try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
- const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
- try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
- try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(
+ util.encoding.Instruction.adrp(.x16, try util.calcNumberOfPages(saddr, taddr) << 12),
+ ), .little);
+ try writer.writeInt(u32, @bitCast(util.encoding.Instruction.add(
+ .x16,
+ .x16,
+ .{ .immediate = @truncate(@as(u64, @bitCast(taddr))) },
+ )), .little);
+ try writer.writeInt(u32, @bitCast(util.encoding.Instruction.br(.x16)), .little);
}
}
const trampoline_size = 3 * @sizeOf(u32);
const util = @import("../aarch64.zig");
- const Instruction = util.Instruction;
};
const assert = std.debug.assert;
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index c8f37125ed..f49406b751 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -1270,9 +1270,13 @@ fn updateNavCode(
log.debug("updateNavCode {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
diff --git a/src/link/Elf/relocation.zig b/src/link/Elf/relocation.zig
index 305dcda789..366d19d9b3 100644
--- a/src/link/Elf/relocation.zig
+++ b/src/link/Elf/relocation.zig
@@ -94,14 +94,18 @@ pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
pub const dwarf = struct {
pub fn crossSectionRelocType(format: DW.Format, cpu_arch: std.Target.Cpu.Arch) u32 {
return switch (cpu_arch) {
- .x86_64 => @intFromEnum(switch (format) {
- .@"32" => elf.R_X86_64.@"32",
+ .x86_64 => @intFromEnum(@as(elf.R_X86_64, switch (format) {
+ .@"32" => .@"32",
.@"64" => .@"64",
- }),
- .riscv64 => @intFromEnum(switch (format) {
- .@"32" => elf.R_RISCV.@"32",
+ })),
+ .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (format) {
+ .@"32" => .ABS32,
+ .@"64" => .ABS64,
+ })),
+ .riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (format) {
+ .@"32" => .@"32",
.@"64" => .@"64",
- }),
+ })),
else => @panic("TODO unhandled cpu arch"),
};
}
@@ -121,6 +125,14 @@ pub const dwarf = struct {
},
.debug_frame => .PC32,
})),
+ .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (source_section) {
+ else => switch (address_size) {
+ .@"32" => .ABS32,
+ .@"64" => .ABS64,
+ else => unreachable,
+ },
+ .debug_frame => .PREL32,
+ })),
.riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
else => switch (address_size) {
.@"32" => .@"32",
diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig
index 3f2ac7cb16..2ab71d8c49 100644
--- a/src/link/Elf/synthetic_sections.zig
+++ b/src/link/Elf/synthetic_sections.zig
@@ -810,54 +810,43 @@ pub const PltSection = struct {
const got_plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.got_plt.?].sh_addr);
// TODO: relax if possible
// .got.plt[2]
- const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
- const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
+ const pages = try util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
+ const ldr_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
- const preamble = &[_]Instruction{
- Instruction.stp(
- .x16,
- .x30,
- Register.sp,
- Instruction.LoadStorePairOffset.pre_index(-16),
- ),
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
- Instruction.add(.x16, .x16, add_off, false),
- Instruction.br(.x17),
- Instruction.nop(),
- Instruction.nop(),
- Instruction.nop(),
+ const preamble = [_]util.encoding.Instruction{
+ .stp(.x16, .x30, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
+ .add(.x16, .x16, .{ .immediate = add_off }),
+ .br(.x17),
+ .nop(),
+ .nop(),
+ .nop(),
};
comptime assert(preamble.len == 8);
- for (preamble) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (preamble) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
for (plt.symbols.items) |ref| {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
- const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
- const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
+ const pages = try util.calcNumberOfPages(source_addr, target_addr);
+ const ldr_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
- const insts = &[_]Instruction{
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
- Instruction.add(.x16, .x16, add_off, false),
- Instruction.br(.x17),
+ const insts = [_]util.encoding.Instruction{
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
+ .add(.x16, .x16, .{ .immediate = add_off }),
+ .br(.x17),
};
comptime assert(insts.len == 4);
- for (insts) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
};
@@ -979,24 +968,20 @@ pub const PltGotSection = struct {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotAddress(elf_file);
const source_addr = sym.pltGotAddress(elf_file);
- const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
- const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
- const insts = &[_]Instruction{
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
- Instruction.br(.x17),
- Instruction.nop(),
+ const pages = try util.calcNumberOfPages(source_addr, target_addr);
+ const off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
+ const insts = [_]util.encoding.Instruction{
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = off } }),
+ .br(.x17),
+ .nop(),
};
comptime assert(insts.len == 4);
- for (insts) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 734b4b6a04..a3845727aa 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -328,6 +328,7 @@ pub fn deinit(self: *MachO) void {
self.unwind_info.deinit(gpa);
self.data_in_code.deinit(gpa);
+ for (self.thunks.items) |*thunk| thunk.deinit(gpa);
self.thunks.deinit(gpa);
}
@@ -612,7 +613,6 @@ pub fn flush(
};
const emit = self.base.emit;
invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}),
};
}
@@ -5374,7 +5374,7 @@ const mem = std.mem;
const meta = std.meta;
const Writer = std.io.Writer;
-const aarch64 = @import("../arch/aarch64/bits.zig");
+const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
const calcUuid = @import("MachO/uuid.zig").calcUuid;
const codegen = @import("../codegen.zig");
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index e084ae0385..2ecd8da532 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -780,8 +780,7 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
- const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
- aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
+ aarch64.writeAdrInst(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target)), code[rel_offset..][0..aarch64.encoding.Instruction.size]);
},
.pageoff => {
@@ -789,26 +788,18 @@ fn resolveRelocInner(
assert(rel.meta.length == 2);
assert(!rel.meta.pcrel);
const target = math.cast(u64, S + A) orelse return error.Overflow;
- const inst_code = code[rel_offset..][0..4];
- if (aarch64.isArithmeticOp(inst_code)) {
- aarch64.writeAddImmInst(@truncate(target), inst_code);
- } else {
- var inst = aarch64.Instruction{
- .load_store_register = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.load_store_register),
- ), inst_code),
- };
- inst.load_store_register.offset = switch (inst.load_store_register.size) {
- 0 => if (inst.load_store_register.v == 1)
- try divExact(self, rel, @truncate(target), 16, macho_file)
- else
- @truncate(target),
- 1 => try divExact(self, rel, @truncate(target), 2, macho_file),
- 2 => try divExact(self, rel, @truncate(target), 4, macho_file),
- 3 => try divExact(self, rel, @truncate(target), 8, macho_file),
- };
- try writer.writeInt(u32, inst.toU32(), .little);
+ const inst_code = code[rel_offset..][0..aarch64.encoding.Instruction.size];
+ var inst: aarch64.encoding.Instruction = .read(inst_code);
+ switch (inst.decode()) {
+ else => unreachable,
+ .data_processing_immediate => aarch64.writeAddImmInst(@truncate(target), inst_code),
+ .load_store => |load_store| {
+ inst.load_store.register_unsigned_immediate.group.imm12 = switch (load_store.register_unsigned_immediate.decode()) {
+ .integer => |integer| try divExact(self, rel, @truncate(target), @as(u4, 1) << @intFromEnum(integer.group.size), macho_file),
+ .vector => |vector| try divExact(self, rel, @truncate(target), @as(u5, 1) << @intFromEnum(vector.group.opc1.decode(vector.group.size)), macho_file),
+ };
+ try writer.writeInt(u32, @bitCast(inst), .little);
+ },
}
},
@@ -834,59 +825,26 @@ fn resolveRelocInner(
break :target math.cast(u64, target) orelse return error.Overflow;
};
- const RegInfo = struct {
- rd: u5,
- rn: u5,
- size: u2,
- };
-
const inst_code = code[rel_offset..][0..4];
- const reg_info: RegInfo = blk: {
- if (aarch64.isArithmeticOp(inst_code)) {
- const inst = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.add_subtract_immediate),
- ), inst_code);
- break :blk .{
- .rd = inst.rd,
- .rn = inst.rn,
- .size = inst.sf,
- };
- } else {
- const inst = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.load_store_register),
- ), inst_code);
- break :blk .{
- .rd = inst.rt,
- .rn = inst.rn,
- .size = inst.size,
- };
- }
- };
-
- var inst = if (sym.getSectionFlags().tlv_ptr) aarch64.Instruction{
- .load_store_register = .{
- .rt = reg_info.rd,
- .rn = reg_info.rn,
- .offset = try divExact(self, rel, @truncate(target), 8, macho_file),
- .opc = 0b01,
- .op1 = 0b01,
- .v = 0,
- .size = reg_info.size,
+ const rd, const rn = switch (aarch64.encoding.Instruction.read(inst_code).decode()) {
+ else => unreachable,
+ .data_processing_immediate => |decoded| .{
+ decoded.add_subtract_immediate.group.Rd.decodeInteger(.doubleword, .{ .sp = true }),
+ decoded.add_subtract_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
- } else aarch64.Instruction{
- .add_subtract_immediate = .{
- .rd = reg_info.rd,
- .rn = reg_info.rn,
- .imm12 = @truncate(target),
- .sh = 0,
- .s = 0,
- .op = 0,
- .sf = @as(u1, @truncate(reg_info.size)),
+ .load_store => |decoded| .{
+ decoded.register_unsigned_immediate.integer.group.Rt.decodeInteger(.doubleword, .{}),
+ decoded.register_unsigned_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
};
- try writer.writeInt(u32, inst.toU32(), .little);
+
+ try writer.writeInt(u32, @bitCast(@as(
+ aarch64.encoding.Instruction,
+ if (sym.getSectionFlags().tlv_ptr) .ldr(rd, .{ .unsigned_offset = .{
+ .base = rn,
+ .offset = try divExact(self, rel, @truncate(target), 8, macho_file) * 8,
+ } }) else .add(rd, rn, .{ .immediate = @truncate(target) }),
+ )), .little);
},
}
}
diff --git a/src/link/MachO/Thunk.zig b/src/link/MachO/Thunk.zig
index e9f67dd5e5..26432da1f5 100644
--- a/src/link/MachO/Thunk.zig
+++ b/src/link/MachO/Thunk.zig
@@ -21,15 +21,17 @@ pub fn getTargetAddress(thunk: Thunk, ref: MachO.Ref, macho_file: *MachO) u64 {
}
pub fn write(thunk: Thunk, macho_file: *MachO, writer: anytype) !void {
+ const Instruction = aarch64.encoding.Instruction;
for (thunk.symbols.keys(), 0..) |ref, i| {
const sym = ref.getSymbol(macho_file).?;
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off: u12 = @truncate(taddr);
- try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(
+ Instruction.add(.x16, .x16, .{ .immediate = @truncate(taddr) }),
+ ), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
}
}
diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig
index d6a56f8411..ef82f82e72 100644
--- a/src/link/MachO/ZigObject.zig
+++ b/src/link/MachO/ZigObject.zig
@@ -945,9 +945,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
diff --git a/src/link/MachO/synthetic.zig b/src/link/MachO/synthetic.zig
index c91c41df5f..22c44d2f7a 100644
--- a/src/link/MachO/synthetic.zig
+++ b/src/link/MachO/synthetic.zig
@@ -105,16 +105,15 @@ pub const StubsSection = struct {
try writer.writeInt(i32, @intCast(target - source - 2 - 4), .little);
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x16,
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@@ -201,18 +200,16 @@ pub const StubsHelperSection = struct {
try writer.writeInt(i32, @intCast(target - source - 6 - 4), .little);
},
.aarch64 => {
- const literal = blk: {
- const div_res = try std.math.divExact(u64, entry_size - @sizeOf(u32), 4);
- break :blk std.math.cast(u18, div_res) orelse return error.Overflow;
- };
- try writer.writeInt(u32, aarch64.Instruction.ldrLiteral(
- .w16,
- literal,
- ).toU32(), .little);
+ const Instruction = aarch64.encoding.Instruction;
+ if (entry_size % Instruction.size != 0) return error.UnexpectedRemainder;
+ try writer.writeInt(u32, @bitCast(
+ Instruction.ldr(.w16, .{ .literal = std.math.cast(i21, entry_size - Instruction.size) orelse
+ return error.Overflow }),
+ ), .little);
const disp = math.cast(i28, @as(i64, @intCast(target)) - @as(i64, @intCast(source + 4))) orelse
return error.Overflow;
- try writer.writeInt(u32, aarch64.Instruction.b(disp).toU32(), .little);
- try writer.writeAll(&.{ 0x0, 0x0, 0x0, 0x0 });
+ try writer.writeInt(u32, @bitCast(Instruction.b(disp)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.udf(0x0)), .little);
},
else => unreachable,
}
@@ -242,31 +239,28 @@ pub const StubsHelperSection = struct {
try writer.writeByte(0x90);
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
- const off: u12 = @truncate(dyld_private_addr);
- try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x17, pages << 12)), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(
+ Instruction.add(.x17, .x17, .{ .immediate = @as(u12, @truncate(dyld_private_addr)) }),
+ ), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.stp(
- .x16,
- .x17,
- aarch64.Register.sp,
- aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
- ).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(
+ Instruction.stp(.x16, .x17, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
+ ), .little);
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
- try writer.writeInt(u32, aarch64.Instruction.ldr(
- .x16,
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.ldr(
.x16,
- aarch64.Instruction.LoadStoreOffset.imm(off),
- ).toU32(), .little);
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(dyld_stub_binder_addr)) } },
+ )), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@@ -426,35 +420,32 @@ pub const ObjcStubsSection = struct {
}
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x1, .x1, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x1, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x1,
+ .{ .unsigned_offset = .{ .base = .x1, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
}
{
const target_sym = obj.getObjcMsgSendRef(macho_file).?.getSymbol(macho_file).?;
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x16,
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
},
else => unreachable,
}
diff --git a/src/link/aarch64.zig b/src/link/aarch64.zig
index d86939a156..c9defc27b3 100644
--- a/src/link/aarch64.zig
+++ b/src/link/aarch64.zig
@@ -1,66 +1,36 @@
-pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @as(u5, @truncate(inst[3]));
- return ((group_decode >> 2) == 4);
-}
+pub const encoding = @import("../codegen.zig").aarch64.encoding;
pub fn writeAddImmInst(value: u12, code: *[4]u8) void {
- var inst = Instruction{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.add_subtract_immediate),
- ), code),
- };
- inst.add_subtract_immediate.imm12 = value;
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.data_processing_immediate.add_subtract_immediate.group.imm12 = value;
+ inst.write(code);
}
pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
- var inst: Instruction = .{
- .load_store_register = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.load_store_register),
- ), code),
- };
- inst.load_store_register.offset = value;
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.load_store.register_unsigned_immediate.group.imm12 = value;
+ inst.write(code);
}
-pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
- const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
- const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
- const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
- return pages;
+pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i33 {
+ return math.cast(i21, (taddr >> 12) - (saddr >> 12)) orelse error.Overflow;
}
-pub fn writeAdrpInst(pages: u21, code: *[4]u8) void {
- var inst = Instruction{
- .pc_relative_address = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.pc_relative_address),
- ), code),
- };
- inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
- inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
- mem.writeInt(u32, code, inst.toU32(), .little);
+pub fn writeAdrInst(imm: i33, code: *[4]u8) void {
+ var inst: encoding.Instruction = .read(code);
+ inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(imm >> 2);
+ inst.data_processing_immediate.pc_relative_addressing.group.immlo = @bitCast(@as(i2, @truncate(imm)));
+ inst.write(code);
}
pub fn writeBranchImm(disp: i28, code: *[4]u8) void {
- var inst = Instruction{
- .unconditional_branch_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.unconditional_branch_immediate),
- ), code),
- };
- inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.branch_exception_generating_system.unconditional_branch_immediate.group.imm26 = @intCast(@shrExact(disp, 2));
+ inst.write(code);
}
const assert = std.debug.assert;
-const bits = @import("../arch/aarch64/bits.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const std = @import("std");
-
-pub const Instruction = bits.Instruction;
-pub const Register = bits.Register;
diff --git a/src/main.zig b/src/main.zig
index 7ad40e1a68..a0a40ae093 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -37,6 +37,7 @@ const dev = @import("dev.zig");
test {
_ = Package;
+ _ = @import("codegen.zig");
}
const thread_stack_size = 60 << 20;
@@ -4624,7 +4625,9 @@ fn cmdTranslateC(
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, fs.path.sep_str, out_zig_path, @errorName(err) });
};
defer zig_file.close();
- try fs.File.stdout().writeFileAll(zig_file, .{});
+ var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var file_reader = zig_file.reader(&.{});
+ _ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
return cleanExit();
}
}
@@ -4645,14 +4648,14 @@ const usage_init =
fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
dev.check(.init_command);
- var strip = false;
+ var template: enum { example, minimal } = .example;
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
- if (mem.eql(u8, arg, "-s") or mem.eql(u8, arg, "--strip")) {
- strip = true;
+ if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--minimal")) {
+ template = .minimal;
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
try fs.File.stdout().writeAll(usage_init);
return cleanExit();
@@ -4665,40 +4668,79 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
}
}
- var templates = findTemplates(gpa, arena, strip);
- defer templates.deinit();
-
const cwd_path = try introspect.getResolvedCwd(arena);
const cwd_basename = fs.path.basename(cwd_path);
const sanitized_root_name = try sanitizeExampleName(arena, cwd_basename);
- const s = fs.path.sep_str;
- const template_paths = [_][]const u8{
- Package.build_zig_basename,
- Package.Manifest.basename,
- "src" ++ s ++ "main.zig",
- "src" ++ s ++ "root.zig",
- };
- var ok_count: usize = 0;
-
const fingerprint: Package.Fingerprint = .generate(sanitized_root_name);
- for (template_paths) |template_path| {
- if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
- std.log.info("created {s}", .{template_path});
- ok_count += 1;
- } else |err| switch (err) {
- error.PathAlreadyExists => std.log.info("preserving already existing file: {s}", .{
- template_path,
- }),
- else => std.log.err("unable to write {s}: {s}\n", .{ template_path, @errorName(err) }),
- }
- }
+ switch (template) {
+ .example => {
+ var templates = findTemplates(gpa, arena);
+ defer templates.deinit();
+
+ const s = fs.path.sep_str;
+ const template_paths = [_][]const u8{
+ Package.build_zig_basename,
+ Package.Manifest.basename,
+ "src" ++ s ++ "main.zig",
+ "src" ++ s ++ "root.zig",
+ };
+ var ok_count: usize = 0;
+
+ for (template_paths) |template_path| {
+ if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
+ std.log.info("created {s}", .{template_path});
+ ok_count += 1;
+ } else |err| switch (err) {
+ error.PathAlreadyExists => std.log.info("preserving already existing file: {s}", .{
+ template_path,
+ }),
+ else => std.log.err("unable to write {s}: {s}\n", .{ template_path, @errorName(err) }),
+ }
+ }
- if (ok_count == template_paths.len) {
- std.log.info("see `zig build --help` for a menu of options", .{});
+ if (ok_count == template_paths.len) {
+ std.log.info("see `zig build --help` for a menu of options", .{});
+ }
+ return cleanExit();
+ },
+ .minimal => {
+ writeSimpleTemplateFile(Package.Manifest.basename,
+ \\.{{
+ \\ .name = .{s},
+ \\ .version = "{s}",
+ \\ .paths = .{{""}},
+ \\ .fingerprint = 0x{x},
+ \\}}
+ \\
+ , .{
+ sanitized_root_name,
+ build_options.version,
+ fingerprint.int(),
+ }) catch |err| switch (err) {
+ else => fatal("failed to create '{s}': {s}", .{ Package.Manifest.basename, @errorName(err) }),
+ error.PathAlreadyExists => fatal("refusing to overwrite '{s}'", .{Package.Manifest.basename}),
+ };
+ writeSimpleTemplateFile(Package.build_zig_basename,
+ \\const std = @import("std");
+ \\pub fn build(b: *std.Build) void {{
+ \\ _ = b; // stub
+ \\}}
+ \\
+ , .{}) catch |err| switch (err) {
+ else => fatal("failed to create '{s}': {s}", .{ Package.build_zig_basename, @errorName(err) }),
+ // `build.zig` already existing is okay: the user has just used `zig init` to set up
+ // their `build.zig.zon` *after* writing their `build.zig`. So this one isn't fatal.
+ error.PathAlreadyExists => {
+ std.log.info("successfully populated '{s}', preserving existing '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
+ return cleanExit();
+ },
+ };
+ std.log.info("successfully populated '{s}' and '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
+ return cleanExit();
+ },
}
- return cleanExit();
}
fn sanitizeExampleName(arena: Allocator, bytes: []const u8) error{OutOfMemory}![]const u8 {
@@ -7226,13 +7268,20 @@ fn loadManifest(
0,
) catch |err| switch (err) {
error.FileNotFound => {
- const fingerprint: Package.Fingerprint = .generate(options.root_name);
- var templates = findTemplates(gpa, arena, true);
- defer templates.deinit();
- templates.write(arena, options.dir, options.root_name, Package.Manifest.basename, fingerprint) catch |e| {
- fatal("unable to write {s}: {s}", .{
- Package.Manifest.basename, @errorName(e),
- });
+ writeSimpleTemplateFile(Package.Manifest.basename,
+ \\.{{
+ \\ .name = .{s},
+ \\ .version = "{s}",
+ \\ .paths = .{{""}},
+ \\ .fingerprint = 0x{x},
+ \\}}
+ \\
+ , .{
+ options.root_name,
+ build_options.version,
+ Package.Fingerprint.generate(options.root_name).int(),
+ }) catch |e| {
+ fatal("unable to write {s}: {s}", .{ Package.Manifest.basename, @errorName(e) });
};
continue;
},
@@ -7273,7 +7322,6 @@ const Templates = struct {
zig_lib_directory: Cache.Directory,
dir: fs.Dir,
buffer: std.ArrayList(u8),
- strip: bool,
fn deinit(templates: *Templates) void {
templates.zig_lib_directory.handle.close();
@@ -7302,23 +7350,9 @@ const Templates = struct {
};
templates.buffer.clearRetainingCapacity();
try templates.buffer.ensureUnusedCapacity(contents.len);
- var new_line = templates.strip;
var i: usize = 0;
while (i < contents.len) {
- if (new_line) {
- const trimmed = std.mem.trimLeft(u8, contents[i..], " ");
- if (std.mem.startsWith(u8, trimmed, "//")) {
- i += std.mem.indexOfScalar(u8, contents[i..], '\n') orelse break;
- i += 1;
- continue;
- } else {
- new_line = false;
- }
- }
-
- if (templates.strip and contents[i] == '\n') {
- new_line = true;
- } else if (contents[i] == '_' or contents[i] == '.') {
+ if (contents[i] == '_' or contents[i] == '.') {
// Both '_' and '.' are allowed because depending on the context
// one prefix will be valid, while the other might not.
if (std.mem.startsWith(u8, contents[i + 1 ..], "NAME")) {
@@ -7347,8 +7381,16 @@ const Templates = struct {
});
}
};
+fn writeSimpleTemplateFile(file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
+ const f = try fs.cwd().createFile(file_name, .{ .exclusive = true });
+ defer f.close();
+ var buf: [4096]u8 = undefined;
+ var fw = f.writer(&buf);
+ try fw.interface.print(fmt, args);
+ try fw.interface.flush();
+}
-fn findTemplates(gpa: Allocator, arena: Allocator, strip: bool) Templates {
+fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
const cwd_path = introspect.getResolvedCwd(arena) catch |err| {
fatal("unable to get cwd: {s}", .{@errorName(err)});
};
@@ -7372,7 +7414,6 @@ fn findTemplates(gpa: Allocator, arena: Allocator, strip: bool) Templates {
.zig_lib_directory = zig_lib_directory,
.dir = template_dir,
.buffer = std.ArrayList(u8).init(gpa),
- .strip = strip,
};
}
diff --git a/src/target.zig b/src/target.zig
index dcacc65fad..ad83414c23 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -248,9 +248,13 @@ pub fn selfHostedBackendIsAsRobustAsLlvm(target: *const std.Target) bool {
return false;
}
-pub fn supportsStackProbing(target: *const std.Target) bool {
- return target.os.tag != .windows and target.os.tag != .uefi and
- (target.cpu.arch == .x86 or target.cpu.arch == .x86_64);
+pub fn supportsStackProbing(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
+ return switch (backend) {
+ .stage2_aarch64, .stage2_x86_64 => true,
+ .stage2_llvm => target.os.tag != .windows and target.os.tag != .uefi and
+ (target.cpu.arch == .x86 or target.cpu.arch == .x86_64),
+ else => false,
+ };
}
pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
@@ -359,6 +363,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
else => {},
}
return switch (zigBackend(target, use_llvm)) {
+ .stage2_aarch64 => true,
.stage2_llvm => true,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => true,
@@ -368,13 +373,22 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
};
}
-pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
+pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
switch (target.cpu.arch) {
.spirv32, .spirv64 => return false,
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
.nvptx, .nvptx64 => return false,
- else => return true,
+ else => {},
}
+ return switch (zigBackend(target, use_llvm)) {
+ .stage2_llvm => true,
+ .stage2_wasm => false,
+ .stage2_x86_64 => switch (target.ofmt) {
+ .elf, .macho => true,
+ else => have_llvm,
+ },
+ else => have_llvm,
+ };
}
pub fn hasRedZone(target: *const std.Target) bool {
@@ -405,6 +419,8 @@ pub fn libcFullLinkFlags(target: *const std.Target) []const []const u8 {
.android, .androideabi, .ohos, .ohoseabi => &.{ "-lm", "-lc", "-ldl" },
else => &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
},
+ // On SerenityOS libc includes libm, libpthread, libdl, and libssp.
+ .serenity => &.{"-lc"},
else => &.{},
};
return result;
@@ -767,6 +783,7 @@ pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.Compiler
pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
return switch (backend) {
+ .stage2_aarch64 => false,
.stage2_powerpc => true,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
@@ -844,6 +861,7 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool {
return switch (feature) {
.panic_fn => switch (backend) {
+ .stage2_aarch64,
.stage2_c,
.stage2_llvm,
.stage2_x86_64,
@@ -864,7 +882,7 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
else => false,
},
.field_reordering => switch (backend) {
- .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
+ .stage2_aarch64, .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.separate_thread => switch (backend) {
diff --git a/stage1/wasm2c.c b/stage1/wasm2c.c
index 425cc682b8..adbf8667e5 100644
--- a/stage1/wasm2c.c
+++ b/stage1/wasm2c.c
@@ -316,10 +316,10 @@ int main(int argc, char **argv) {
"}\n"
"\n"
"static uint32_t memory_grow(uint8_t **m, uint32_t *p, uint32_t *c, uint32_t n) {\n"
- " uint8_t *new_m = *m;\n"
" uint32_t r = *p;\n"
" uint32_t new_p = r + n;\n"
" if (new_p > UINT32_C(0xFFFF)) return UINT32_C(0xFFFFFFFF);\n"
+ " uint8_t *new_m = *m;\n"
" uint32_t new_c = *c;\n"
" if (new_c < new_p) {\n"
" do new_c += new_c / 2 + 8; while (new_c < new_p);\n"
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index b6d4e8ba00..0cc1a56393 100644
--- a/stage1/zig1.wasm
+++ b/stage1/zig1.wasm
Binary files differ
diff --git a/test/behavior.zig b/test/behavior.zig
index 414ce2e00a..590dfa8137 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -123,7 +123,6 @@ test {
}
if (builtin.zig_backend != .stage2_arm and
- builtin.zig_backend != .stage2_aarch64 and
builtin.zig_backend != .stage2_spirv)
{
_ = @import("behavior/export_keyword.zig");
@@ -141,7 +140,8 @@ test {
}
// This bug only repros in the root file
-test "deference @embedFile() of a file full of zero bytes" {
+test "dereference @embedFile() of a file full of zero bytes" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const contents = @embedFile("behavior/zero.bin").*;
diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig
index 530802a0ca..078362cf3e 100644
--- a/test/behavior/abs.zig
+++ b/test/behavior/abs.zig
@@ -3,7 +3,6 @@ const std = @import("std");
const expect = std.testing.expect;
test "@abs integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -50,7 +49,6 @@ fn testAbsIntegers() !void {
}
test "@abs unsigned integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -90,7 +88,6 @@ fn testAbsUnsignedIntegers() !void {
}
test "@abs big int <= 128 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -153,7 +150,6 @@ fn testAbsUnsignedBigInt() !void {
}
test "@abs floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -207,9 +203,9 @@ fn testAbsFloats(comptime T: type) !void {
}
test "@abs int vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -275,8 +271,8 @@ fn testAbsIntVectors(comptime len: comptime_int) !void {
}
test "@abs unsigned int vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -334,8 +330,8 @@ fn testAbsUnsignedIntVectors(comptime len: comptime_int) !void {
}
test "@abs float vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index b3750c9d3d..df90207267 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -16,7 +16,6 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -25,7 +24,6 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -74,7 +72,6 @@ test "alignment of struct with pointer has same alignment as usize" {
test "alignment and size of structs with 128-bit fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct {
@@ -160,7 +157,6 @@ test "alignment and size of structs with 128-bit fields" {
}
test "implicitly decreasing slice alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -173,7 +169,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -186,7 +181,6 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -205,7 +199,6 @@ fn sliceExpects4(slice: []align(4) u32) void {
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -218,7 +211,6 @@ fn give() anyerror!u128 {
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -238,7 +230,6 @@ test "page aligned array on stack" {
}
test "function alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -268,7 +259,6 @@ test "function alignment" {
}
test "implicitly decreasing fn alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -292,7 +282,6 @@ fn alignedBig() align(16) i32 {
}
test "@alignCast functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -376,7 +365,6 @@ const DefaultAligned = struct {
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -396,7 +384,6 @@ var default_aligned_global = DefaultAligned{
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -405,8 +392,8 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -426,7 +413,6 @@ test "struct field explicit alignment" {
}
test "align(N) on functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -455,7 +441,6 @@ test "comptime alloc alignment" {
// TODO: it's impossible to test this in Zig today, since comptime vars do not have runtime addresses.
if (true) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -468,7 +453,6 @@ test "comptime alloc alignment" {
}
test "@alignCast null" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -484,7 +468,6 @@ test "alignment of slice element" {
}
test "sub-aligned pointer field access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -538,7 +521,6 @@ test "alignment of zero-bit types is respected" {
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 90fbd326ed..76dcc8075d 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -19,7 +19,6 @@ test "array to slice" {
}
test "arrays" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -47,7 +46,6 @@ fn getArrayLen(a: []const u32) usize {
}
test "array concat with undefined" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -73,7 +71,6 @@ test "array concat with undefined" {
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array: [2]u8 = .{ 1, 2 };
@@ -89,7 +86,6 @@ test "array concat with tuple" {
test "array init with concat" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a = 'a';
var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
@@ -98,7 +94,6 @@ test "array init with concat" {
test "array init with mult" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = 'a';
@@ -110,7 +105,6 @@ test "array init with mult" {
}
test "array literal with explicit type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@@ -138,7 +132,6 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -162,7 +155,6 @@ test "array len field" {
test "array with sentinels" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -200,7 +192,6 @@ test "void arrays" {
test "nested arrays of strings" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -215,7 +206,6 @@ test "nested arrays of strings" {
}
test "nested arrays of integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array_of_numbers = [_][2]u8{
@@ -230,7 +220,6 @@ test "nested arrays of integers" {
}
test "implicit comptime in array type size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var arr: [plusOne(10)]bool = undefined;
@@ -243,7 +232,6 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -285,7 +273,6 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -308,7 +295,6 @@ const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var s = Str{ .a = s_array[0..] };
@@ -323,7 +309,6 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -345,7 +330,6 @@ test "read/write through global variable array of struct fields initialized via
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testImplicitCastSingleItemPtr();
@@ -364,7 +348,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const arr = [_]u8{ 1, 2 };
@@ -376,7 +359,6 @@ test "comptime evaluating function that takes array by value" {
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var two: i32 = 2;
@@ -387,7 +369,6 @@ test "runtime initialize array elem and then implicit cast to slice" {
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -414,8 +395,8 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -476,7 +457,6 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -502,7 +482,6 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -520,7 +499,6 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -540,7 +518,6 @@ test "type deduction for array subscript expression" {
test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -564,7 +541,7 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -587,8 +564,8 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -628,7 +605,6 @@ test "array with comptime-only element type" {
}
test "tuple to array handles sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -641,7 +617,6 @@ test "tuple to array handles sentinel" {
test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -675,8 +650,8 @@ test "runtime initialized sentinel-terminated array literal" {
}
test "array of array agregate init" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [1]u32{11} ** 10;
@@ -725,7 +700,6 @@ test "array init with no result location has result type" {
}
test "slicing array of zero-sized values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -890,7 +864,6 @@ test "tuple initialized through reference to anonymous array init provides resul
test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: [10][10]u32 = undefined;
@@ -945,7 +918,6 @@ test "array initialized with array with sentinel" {
}
test "store array of array of structs at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -970,7 +942,6 @@ test "store array of array of structs at comptime" {
}
test "accessing multidimensional global array at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -986,8 +957,8 @@ test "accessing multidimensional global array at comptime" {
}
test "union that needs padding bytes inside an array" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1023,7 +994,6 @@ test "runtime index of array of zero-bit values" {
}
test "@splat array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1046,7 +1016,6 @@ test "@splat array" {
test "@splat array with sentinel" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1070,7 +1039,6 @@ test "@splat array with sentinel" {
test "@splat zero-length array" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig
index d79fca930a..dae2f579c0 100644
--- a/test/behavior/asm.zig
+++ b/test/behavior/asm.zig
@@ -7,7 +7,6 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux
comptime {
if (builtin.zig_backend != .stage2_arm and
- builtin.zig_backend != .stage2_aarch64 and
!(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly
is_x86_64_linux)
{
@@ -30,7 +29,6 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@@ -41,9 +39,9 @@ test "module level assembly" {
}
test "output constraint modifiers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -63,9 +61,9 @@ test "output constraint modifiers" {
}
test "alternative constraints" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -83,7 +81,6 @@ test "alternative constraints" {
test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -127,7 +124,6 @@ test "sized integer/float in asm input" {
test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -167,7 +163,6 @@ test "rw constraint (x86_64)" {
test "asm modifiers (AArch64)" {
if (!builtin.target.cpu.arch.isAARCH64()) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig
index 54850c47eb..cda8b5f033 100644
--- a/test/behavior/atomics.zig
+++ b/test/behavior/atomics.zig
@@ -12,7 +12,7 @@ const supports_128_bit_atomics = switch (builtin.cpu.arch) {
};
test "cmpxchg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -39,7 +39,7 @@ fn testCmpxchg() !void {
}
test "atomicrmw and atomicload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +68,7 @@ fn testAtomicLoad(ptr: *u8) !void {
}
test "cmpxchg with ptr" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -94,7 +94,7 @@ test "cmpxchg with ptr" {
}
test "cmpxchg with ignored result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -110,8 +110,8 @@ test "128-bit cmpxchg" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -139,7 +139,7 @@ fn test_u128_cmpxchg() !void {
var a_global_variable = @as(u32, 1234);
test "cmpxchg on a global variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -149,7 +149,7 @@ test "cmpxchg on a global variable" {
}
test "atomic load and rmw with enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -167,7 +167,7 @@ test "atomic load and rmw with enum" {
}
test "atomic store" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -185,7 +185,7 @@ fn testAtomicStore() !void {
}
test "atomicrmw with floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -211,7 +211,7 @@ fn testAtomicRmwFloat() !void {
}
test "atomicrmw with ints" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -290,7 +290,7 @@ test "atomicrmw with 128-bit ints" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testAtomicRmwInt128(.signed);
try testAtomicRmwInt128(.unsigned);
@@ -359,7 +359,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
}
test "atomics with different types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -409,7 +409,6 @@ fn testAtomicsWithPackedStruct(comptime T: type, a: T, b: T) !void {
}
test "return @atomicStore, using it as a void value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index bcd72d5176..c98caed91f 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -39,7 +39,6 @@ test "truncate to non-power-of-two integers" {
}
test "truncate to non-power-of-two integers from 128-bit" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -422,7 +421,6 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -448,7 +446,6 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -474,7 +471,6 @@ fn testPointerToVoidReturnType2() *const void {
}
test "array 2D const double ptr" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -487,7 +483,6 @@ test "array 2D const double ptr" {
}
test "array 2D const double ptr with offset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -500,7 +495,6 @@ test "array 2D const double ptr with offset" {
}
test "array 3D const double ptr with offset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -536,7 +530,6 @@ fn nine() u8 {
}
test "struct inside function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testStructInFn();
@@ -588,7 +581,6 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -719,7 +711,6 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const s1 =
@@ -732,7 +723,6 @@ test "multiline string literal is null terminated" {
}
test "string escapes" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -764,7 +754,6 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -787,7 +776,6 @@ test "string concatenation" {
}
test "result location is optional inside error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -803,7 +791,6 @@ fn maybe(x: bool) anyerror!?u32 {
}
test "auto created variables have correct alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -821,7 +808,6 @@ test "auto created variables have correct alignment" {
test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -866,7 +852,6 @@ test "if expression type coercion" {
}
test "discarding the result of various expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -908,7 +893,6 @@ test "labeled block implicitly ends in a break" {
}
test "catch in block has correct result location" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -964,7 +948,6 @@ test "vector initialized with array init syntax has proper type" {
}
test "weird array and tuple initializations" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1010,7 +993,6 @@ test "generic function uses return type of other generic function" {
// https://github.com/ziglang/zig/issues/12208
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn call(
@@ -1128,7 +1110,6 @@ test "returning an opaque type from a function" {
}
test "orelse coercion as function argument" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Loc = struct { start: i32 = -1 };
@@ -1378,7 +1359,6 @@ test "copy array of self-referential struct" {
test "break out of block based on comptime known values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1412,8 +1392,8 @@ test "break out of block based on comptime known values" {
}
test "allocation and looping over 3-byte integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index 33742f21c4..05b4444708 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -112,7 +112,7 @@ test "comptime shift safety check" {
}
test "Saturating Shift Left where lhs is of a computed type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -161,6 +161,7 @@ comptime {
}
test "Saturating Shift Left" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 590c4c28fb..15a56f9baa 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -20,7 +20,6 @@ test "@bitCast iX -> uX (32, 64)" {
}
test "@bitCast iX -> uX (8, 16, 128)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -35,8 +34,8 @@ test "@bitCast iX -> uX (8, 16, 128)" {
}
test "@bitCast iX -> uX exotic integers" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,8 +79,8 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
}
test "bitcast uX to bytes" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -296,9 +295,9 @@ test "triple level result location with bitcast sandwich passed as tuple element
}
test "@bitCast packed struct of floats" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -334,9 +333,9 @@ test "@bitCast packed struct of floats" {
}
test "comptime @bitCast packed struct to int and back" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -379,7 +378,6 @@ test "comptime bitcast with fields following f80" {
}
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -393,7 +391,7 @@ test "comptime bitcast with fields following f80" {
}
test "bitcast vector to integer and back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -420,7 +418,6 @@ fn bitCastWrapper128(x: f128) u128 {
return @as(u128, @bitCast(x));
}
test "bitcast nan float does not modify signaling bit" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -473,7 +470,7 @@ test "bitcast nan float does not modify signaling bit" {
}
test "@bitCast of packed struct of bools all true" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -494,7 +491,7 @@ test "@bitCast of packed struct of bools all true" {
}
test "@bitCast of packed struct of bools all false" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -514,7 +511,7 @@ test "@bitCast of packed struct of bools all false" {
}
test "@bitCast of packed struct containing pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -544,7 +541,7 @@ test "@bitCast of packed struct containing pointer" {
}
test "@bitCast of extern struct containing pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index c6f3e28126..05096cc360 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -8,8 +8,8 @@ test "@bitReverse large exotic integer" {
}
test "@bitReverse" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -121,9 +121,9 @@ fn vector8() !void {
}
test "bitReverse vectors u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -141,9 +141,9 @@ fn vector16() !void {
}
test "bitReverse vectors u16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -161,9 +161,9 @@ fn vector24() !void {
}
test "bitReverse vectors u24" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
index b772b9d606..cd3bc58de6 100644
--- a/test/behavior/builtin_functions_returning_void_or_noreturn.zig
+++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -7,7 +7,6 @@ var x: u8 = 1;
// This excludes builtin functions that return void or noreturn that cannot be tested.
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index 5092a82ace..3cf7b7e015 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -3,40 +3,8 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- if (builtin.zig_backend == .stage2_wasm) {
- // TODO: Remove when self-hosted wasm supports more types for byteswap
- const ByteSwapIntTest = struct {
- fn run() !void {
- try t(u8, 0x12, 0x12);
- try t(u16, 0x1234, 0x3412);
- try t(u24, 0x123456, 0x563412);
- try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
- try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
- try t(u32, 0x12345678, 0x78563412);
- try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
- try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
- try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
-
- try t(u0, @as(u0, 0), 0);
- try t(i8, @as(i8, -50), -50);
- try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
- try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
- try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
- try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
- }
- fn t(comptime I: type, input: I, expected_output: I) !void {
- try std.testing.expect(expected_output == @byteSwap(input));
- }
- };
- try comptime ByteSwapIntTest.run();
- try ByteSwapIntTest.run();
- return;
- }
-
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -51,23 +19,44 @@ test "@byteSwap integers" {
try t(u32, 0x12345678, 0x78563412);
try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
- try t(u40, 0x123456789a, 0x9a78563412);
- try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
- try t(u56, 0x123456789abcde, 0xdebc9a78563412);
try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
- try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
- try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
- try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
try t(u0, @as(u0, 0), 0);
try t(i8, @as(i8, -50), -50);
try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
+ try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
+ }
+ fn t(comptime I: type, input: I, expected_output: I) !void {
+ try std.testing.expect(expected_output == @byteSwap(input));
+ }
+ };
+ try comptime ByteSwapIntTest.run();
+ try ByteSwapIntTest.run();
+}
+
+test "@byteSwap exotic integers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const ByteSwapIntTest = struct {
+ fn run() !void {
+ try t(u0, 0, 0);
+ try t(u40, 0x123456789a, 0x9a78563412);
+ try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
+ try t(u56, 0x123456789abcde, 0xdebc9a78563412);
+ try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
+ try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
+ try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
+
try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412));
try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
- try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
try t(
@@ -93,9 +82,9 @@ fn vector8() !void {
}
test "@byteSwap vectors u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -113,9 +102,9 @@ fn vector16() !void {
}
test "@byteSwap vectors u16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -133,9 +122,9 @@ fn vector24() !void {
}
test "@byteSwap vectors u24" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index e509fcda35..e05db0827f 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -20,8 +20,8 @@ test "super basic invocations" {
}
test "basic invocations" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -60,7 +60,6 @@ test "basic invocations" {
}
test "tuple parameters" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -95,7 +94,6 @@ test "tuple parameters" {
test "result location of function call argument through runtime condition and struct init" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum { a, b };
@@ -115,6 +113,7 @@ test "result location of function call argument through runtime condition and st
}
test "function call with 40 arguments" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -270,7 +269,7 @@ test "arguments to comptime parameters generated in comptime blocks" {
}
test "forced tail call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -305,7 +304,7 @@ test "forced tail call" {
}
test "inline call preserves tail call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -342,7 +341,6 @@ test "inline call preserves tail call" {
}
test "inline call doesn't re-evaluate non generic struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -409,7 +407,6 @@ test "recursive inline call with comptime known argument" {
}
test "inline while with @call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -439,7 +436,6 @@ test "method call as parameter type" {
}
test "non-anytype generic parameters provide result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -468,7 +464,6 @@ test "non-anytype generic parameters provide result type" {
}
test "argument to generic function has correct result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -521,7 +516,6 @@ test "call function in comptime field" {
test "call function pointer in comptime field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -573,7 +567,6 @@ test "value returned from comptime function is comptime known" {
}
test "registers get overwritten when ignoring return" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.cpu.arch != .x86_64 or builtin.os.tag != .linux) return error.SkipZigTest;
@@ -619,7 +612,6 @@ test "call with union with zero sized field is not memorized incorrectly" {
}
test "function call with cast to anyopaque pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -637,6 +629,7 @@ test "function call with cast to anyopaque pointer" {
}
test "arguments pointed to on stack into tailcall" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -708,7 +701,7 @@ test "arguments pointed to on stack into tailcall" {
}
test "tail call function pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index a0b265490d..8fcfbbd9a2 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -21,7 +21,6 @@ test "integer literal to pointer cast" {
}
test "peer type resolution: ?T and T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -100,7 +99,6 @@ test "comptime_int @floatFromInt" {
}
test "@floatFromInt" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -121,7 +119,6 @@ test "@floatFromInt" {
}
test "@floatFromInt(f80)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -157,7 +154,6 @@ test "@floatFromInt(f80)" {
}
test "@intFromFloat" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -181,7 +177,6 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -241,7 +236,6 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -262,7 +256,6 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -299,7 +292,6 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -324,7 +316,6 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -338,7 +329,6 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coercion to undefined at runtime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -368,7 +358,6 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -390,7 +379,6 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var w: u31 = 5;
@@ -403,7 +391,6 @@ test "peer type unsigned int to signed" {
}
test "expected [*c]const u8, found [*:0]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -415,7 +402,6 @@ test "expected [*c]const u8, found [*:0]const u8" {
}
test "explicit cast from integer to error type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -431,7 +417,6 @@ fn testCastIntToErr(err: anyerror) !void {
}
test "peer resolve array and const slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -447,7 +432,6 @@ fn testPeerResolveArrayConstSlice(b: bool) !void {
}
test "implicitly cast from T to anyerror!?T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -473,7 +457,6 @@ fn castToOptionalTypeError(z: i32) !void {
}
test "implicitly cast from [0]T to anyerror![]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testCastZeroArrayToErrSliceMut();
@@ -489,7 +472,6 @@ fn gimmeErrOrSlice() anyerror![]u8 {
}
test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -522,7 +504,6 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 {
}
test "implicit cast from *const [N]T to []const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -548,7 +529,6 @@ fn testCastConstArrayRefToConstSlice() !void {
}
test "peer type resolution: error and [N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -573,7 +553,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 {
}
test "single-item pointer of array to slice to unknown length pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -603,7 +582,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
}
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -613,8 +591,8 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
}
test "@intCast on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -651,7 +629,6 @@ test "@intCast on vector" {
}
test "@floatCast cast down" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -670,7 +647,6 @@ test "@floatCast cast down" {
}
test "peer type resolution: unreachable, error set, unreachable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Error = error{
@@ -704,7 +680,6 @@ test "peer cast: error set any anyerror" {
}
test "peer type resolution: error set supersets" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -735,7 +710,6 @@ test "peer type resolution: error set supersets" {
test "peer type resolution: disjoint error sets" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -765,7 +739,6 @@ test "peer type resolution: disjoint error sets" {
test "peer type resolution: error union and error set" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -799,7 +772,6 @@ test "peer type resolution: error union and error set" {
test "peer type resolution: error union after non-error" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -833,7 +805,6 @@ test "peer type resolution: error union after non-error" {
test "peer cast *[0]T to E![]const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -849,7 +820,6 @@ test "peer cast *[0]T to E![]const T" {
test "peer cast *[0]T to []const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -872,7 +842,6 @@ test "peer cast *[N]T to [*]T" {
}
test "peer resolution of string literals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -895,7 +864,6 @@ test "peer resolution of string literals" {
}
test "peer cast [:x]T to []T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -912,7 +880,6 @@ test "peer cast [:x]T to []T" {
}
test "peer cast [N:x]T to [N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -929,7 +896,6 @@ test "peer cast [N:x]T to [N]T" {
}
test "peer cast *[N:x]T to *[N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -945,7 +911,6 @@ test "peer cast *[N:x]T to *[N]T" {
}
test "peer cast [*:x]T to [*]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -966,7 +931,6 @@ test "peer cast [*:x]T to [*]T" {
}
test "peer cast [:x]T to [*:x]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -988,7 +952,6 @@ test "peer cast [:x]T to [*:x]T" {
}
test "peer type resolution implicit cast to return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1009,7 +972,6 @@ test "peer type resolution implicit cast to return type" {
}
test "peer type resolution implicit cast to variable type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1035,7 +997,6 @@ test "variable initialization uses result locations properly with regards to the
}
test "cast between C pointer with different but compatible types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1053,7 +1014,6 @@ test "cast between C pointer with different but compatible types" {
}
test "peer type resolve string lit with sentinel-terminated mutable slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1104,7 +1064,6 @@ test "comptime float casts" {
}
test "pointer reinterpret const float to int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// The hex representation is 0x3fe3333333333303.
@@ -1119,7 +1078,6 @@ test "pointer reinterpret const float to int" {
}
test "implicit cast from [*]T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1184,7 +1142,6 @@ test "cast function with an opaque parameter" {
}
test "implicit ptr to *anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1199,7 +1156,6 @@ test "implicit ptr to *anyopaque" {
}
test "return null from fn () anyerror!?&T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1216,7 +1172,6 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1237,7 +1192,6 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1251,7 +1205,6 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1274,7 +1227,6 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1291,7 +1243,6 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1306,7 +1257,6 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = @as(anyerror![]const u8, &[0]u8{});
@@ -1330,7 +1280,6 @@ test "cast from array reference to fn: runtime fn ptr" {
}
test "*const [N]null u8 to ?[]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1367,7 +1316,6 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1386,7 +1334,6 @@ test "cast between *[N]void and []void" {
}
test "peer resolve arrays of different size to const slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1400,7 +1347,6 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1421,7 +1367,6 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1441,7 +1386,6 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1460,7 +1404,6 @@ test "peer type resolution: unreachable, null, slice" {
}
test "cast i8 fn call peers to i32 result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1482,7 +1425,6 @@ test "cast i8 fn call peers to i32 result" {
}
test "cast compatible optional types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1494,7 +1436,6 @@ test "cast compatible optional types" {
}
test "coerce undefined single-item pointer of array to error union of slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = @as([*]u8, undefined)[0..0];
@@ -1513,7 +1454,6 @@ test "pointer to empty struct literal to mutable slice" {
}
test "coerce between pointers of compatible differently-named floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1548,7 +1488,6 @@ test "peer type resolution of const and non-const pointer to array" {
}
test "intFromFloat to zero-bit int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1573,8 +1512,6 @@ test "cast typed undefined to int" {
}
// test "implicit cast from [:0]T to [*c]T" {
-// if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
// var a: [:0]const u8 = "foo";
// _ = &a;
// const b: [*c]const u8 = a;
@@ -1584,7 +1521,6 @@ test "cast typed undefined to int" {
// }
test "bitcast packed struct with u0" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = packed struct(u2) { a: u0, b: u2 };
@@ -1691,7 +1627,6 @@ test "coercion from single-item pointer to @as to slice" {
}
test "peer type resolution: const sentinel slice and mutable non-sentinel slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1721,7 +1656,6 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
}
test "peer type resolution: float and comptime-known fixed-width integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1743,7 +1677,7 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
}
test "peer type resolution: same array type with sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1766,7 +1700,6 @@ test "peer type resolution: same array type with sentinel" {
}
test "peer type resolution: array with sentinel and array without sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1789,7 +1722,7 @@ test "peer type resolution: array with sentinel and array without sentinel" {
}
test "peer type resolution: array and vector with same child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1813,7 +1746,7 @@ test "peer type resolution: array and vector with same child type" {
}
test "peer type resolution: array with smaller child type and vector with larger child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1837,7 +1770,7 @@ test "peer type resolution: array with smaller child type and vector with larger
}
test "peer type resolution: error union and optional of same type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1861,7 +1794,6 @@ test "peer type resolution: error union and optional of same type" {
}
test "peer type resolution: C pointer and @TypeOf(null)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1884,7 +1816,6 @@ test "peer type resolution: C pointer and @TypeOf(null)" {
}
test "peer type resolution: three-way resolution combines error set and optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1927,7 +1858,7 @@ test "peer type resolution: three-way resolution combines error set and optional
}
test "peer type resolution: vector and optional vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1952,7 +1883,6 @@ test "peer type resolution: vector and optional vector" {
}
test "peer type resolution: optional fixed-width int and comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1974,7 +1904,7 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
}
test "peer type resolution: array and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1998,7 +1928,7 @@ test "peer type resolution: array and tuple" {
}
test "peer type resolution: vector and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2022,7 +1952,7 @@ test "peer type resolution: vector and tuple" {
}
test "peer type resolution: vector and array and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2066,7 +1996,6 @@ test "peer type resolution: vector and array and tuple" {
}
test "peer type resolution: empty tuple pointer and slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2088,7 +2017,6 @@ test "peer type resolution: empty tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2110,7 +2038,6 @@ test "peer type resolution: tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and optional slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Miscompilation on Intel's OpenCL CPU runtime.
@@ -2133,7 +2060,6 @@ test "peer type resolution: tuple pointer and optional slice" {
}
test "peer type resolution: many compatible pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2200,7 +2126,6 @@ test "peer type resolution: many compatible pointers" {
}
test "peer type resolution: tuples with comptime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -2232,7 +2157,6 @@ test "peer type resolution: tuples with comptime fields" {
}
test "peer type resolution: C pointer and many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2256,7 +2180,6 @@ test "peer type resolution: C pointer and many pointer" {
}
test "peer type resolution: pointer attributes are combined correctly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2338,7 +2261,7 @@ test "peer type resolution: pointer attributes are combined correctly" {
}
test "peer type resolution: arrays of compatible types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2356,7 +2279,6 @@ test "peer type resolution: arrays of compatible types" {
}
test "cast builtins can wrap result in optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2394,7 +2316,6 @@ test "cast builtins can wrap result in optional" {
}
test "cast builtins can wrap result in error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2432,7 +2353,6 @@ test "cast builtins can wrap result in error union" {
}
test "cast builtins can wrap result in error union and optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2471,8 +2391,8 @@ test "cast builtins can wrap result in error union and optional" {
}
test "@floatCast on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2512,8 +2432,8 @@ test "@floatCast on vector" {
}
test "@ptrFromInt on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2537,8 +2457,8 @@ test "@ptrFromInt on vector" {
}
test "@intFromPtr on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2562,8 +2482,8 @@ test "@intFromPtr on vector" {
}
test "@floatFromInt on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2582,8 +2502,8 @@ test "@floatFromInt on vector" {
}
test "@intFromFloat on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2602,8 +2522,8 @@ test "@intFromFloat on vector" {
}
test "@intFromBool on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2639,7 +2559,6 @@ test "15-bit int to float" {
}
test "@as does not corrupt values with incompatible representations" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2654,7 +2573,6 @@ test "@as does not corrupt values with incompatible representations" {
}
test "result information is preserved through many nested structures" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2679,7 +2597,7 @@ test "result information is preserved through many nested structures" {
}
test "@intCast vector of signed integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2703,7 +2621,6 @@ test "result type is preserved into comptime block" {
}
test "bitcast vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const u8x32 = @Vector(32, u8);
@@ -2766,6 +2683,7 @@ test "@intFromFloat boundary cases" {
}
test "@intFromFloat vector boundary cases" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig
index 11a782a1ea..30cad924fe 100644
--- a/test/behavior/cast_int.zig
+++ b/test/behavior/cast_int.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
test "@intCast i32 to u7" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -19,7 +18,6 @@ test "@intCast i32 to u7" {
}
test "coerce i8 to i32 and @intCast back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -36,6 +34,7 @@ test "coerce i8 to i32 and @intCast back" {
test "coerce non byte-sized integers accross 32bits boundary" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
{
var v: u21 = 6417;
_ = &v;
@@ -164,8 +163,9 @@ const Piece = packed struct {
}
};
+// Originally reported at https://github.com/ziglang/zig/issues/14200
test "load non byte-sized optional value" {
- // Originally reported at https://github.com/ziglang/zig/issues/14200
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -181,6 +181,7 @@ test "load non byte-sized optional value" {
}
test "load non byte-sized value in struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig
index 64b494b23e..9e0c378f1b 100644
--- a/test/behavior/comptime_memory.zig
+++ b/test/behavior/comptime_memory.zig
@@ -66,7 +66,6 @@ fn bigToNativeEndian(comptime T: type, v: T) T {
return if (endian == .big) v else @byteSwap(v);
}
test "type pun endianness" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -360,7 +359,6 @@ test "offset field ptr by enclosing array element size" {
}
test "accessing reinterpreted memory of parent object" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = extern struct {
diff --git a/test/behavior/const_slice_child.zig b/test/behavior/const_slice_child.zig
index adce2b8ced..83ac5c16a9 100644
--- a/test/behavior/const_slice_child.zig
+++ b/test/behavior/const_slice_child.zig
@@ -7,7 +7,6 @@ const expect = testing.expect;
var argv: [*]const [*]const u8 = undefined;
test "const slice child" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/decl_literals.zig b/test/behavior/decl_literals.zig
index ce47fd870a..f96f461771 100644
--- a/test/behavior/decl_literals.zig
+++ b/test/behavior/decl_literals.zig
@@ -35,7 +35,6 @@ test "decl literal with pointer" {
test "call decl literal with optional" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig
index cb4277d7fa..72a8badc1d 100644
--- a/test/behavior/defer.zig
+++ b/test/behavior/defer.zig
@@ -32,7 +32,6 @@ test "defer and labeled break" {
}
test "errdefer does not apply to fn inside fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| try expect(e == error.Bad);
@@ -51,7 +50,6 @@ fn testNestedFnErrDefer() anyerror!void {
test "return variable while defer expression in scope to modify it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -91,7 +89,6 @@ fn runSomeErrorDefers(x: bool) !bool {
}
test "mixing normal and error defers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -110,7 +107,7 @@ test "mixing normal and error defers" {
}
test "errdefer with payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,8 +129,8 @@ test "errdefer with payload" {
}
test "reference to errdefer payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -157,7 +154,6 @@ test "reference to errdefer payload" {
}
test "simple else prong doesn't emit an error for unreachable else prong" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig
index d5c18fc9f6..d719a611e6 100644
--- a/test/behavior/enum.zig
+++ b/test/behavior/enum.zig
@@ -25,7 +25,6 @@ fn testEnumFromIntEval(x: i32) !void {
const EnumFromIntNumber = enum { Zero, One, Two, Three, Four };
test "int to enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumFromIntEval(3);
@@ -608,7 +607,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void {
}
test "enum with specified tag values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedTagValues(MultipleChoice.C);
@@ -616,7 +614,6 @@ test "enum with specified tag values" {
}
test "non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -680,7 +677,6 @@ test "empty non-exhaustive enum" {
}
test "single field non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -744,7 +740,6 @@ test "cast integer literal to enum" {
}
test "enum with specified and unspecified tag values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D);
@@ -904,8 +899,8 @@ test "enum value allocation" {
}
test "enum literal casting to tagged union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Arch = union(enum) {
@@ -931,7 +926,6 @@ test "enum literal casting to tagged union" {
const Bar = enum { A, B, C, D };
test "enum literal casting to error union with payload enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var bar: error{B}!Bar = undefined;
@@ -941,8 +935,8 @@ test "enum literal casting to error union with payload enum" {
}
test "constant enum initialization with differing sizes" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -985,8 +979,8 @@ fn test3_2(f: Test3Foo) !void {
}
test "@tagName" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1002,8 +996,8 @@ fn testEnumTagNameBare(n: anytype) []const u8 {
const BareNumber = enum { One, Two, Three };
test "@tagName non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1014,8 +1008,8 @@ test "@tagName non-exhaustive enum" {
const NonExhaustive = enum(u8) { A, B, _ };
test "@tagName is null-terminated" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1030,8 +1024,8 @@ test "@tagName is null-terminated" {
}
test "tag name with assigned enum values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1046,7 +1040,6 @@ test "tag name with assigned enum values" {
}
test "@tagName on enum literals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1055,8 +1048,8 @@ test "@tagName on enum literals" {
}
test "tag name with signed enum values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1073,8 +1066,8 @@ test "tag name with signed enum values" {
}
test "@tagName in callconv(.c) function" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1091,7 +1084,6 @@ fn testEnumTagNameCallconvC() callconv(.c) [*:0]const u8 {
test "enum literal casting to optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var bar: ?Bar = undefined;
@@ -1117,8 +1109,8 @@ const bit_field_1 = BitFieldOfEnums{
};
test "bit field access with enum fields" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1158,8 +1150,8 @@ test "enum literal in array literal" {
}
test "tag name functions are unique" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1179,7 +1171,6 @@ test "tag name functions are unique" {
}
test "size of enum with only one tag which has explicit integer tag type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum(u8) { nope = 10 };
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index 70edb900ff..4ce94bb43b 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -402,7 +402,6 @@ fn intLiteral(str: []const u8) !?i64 {
test "nested error union function call in optional unwrap" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -448,7 +447,6 @@ test "nested error union function call in optional unwrap" {
}
test "return function call to error set from error union function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -465,7 +463,6 @@ test "return function call to error set from error union function" {
}
test "optional error set is the same size as error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -481,7 +478,6 @@ test "optional error set is the same size as error set" {
}
test "nested catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -530,7 +526,7 @@ test "function pointer with return type that is error union with payload which i
}
test "return result loc as peer result loc in inferred error set function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -562,7 +558,6 @@ test "return result loc as peer result loc in inferred error set function" {
test "error payload type is correctly resolved" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const MyIntWrapper = struct {
@@ -591,7 +586,6 @@ test "error union comptime caching" {
test "@errorName" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -606,7 +600,6 @@ fn gimmeItBroke() anyerror {
test "@errorName sentinel length matches slice length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -701,7 +694,6 @@ test "coerce error set to the current inferred error set" {
test "error union payload is properly aligned" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -719,7 +711,6 @@ test "error union payload is properly aligned" {
}
test "ret_ptr doesn't cause own inferred error set to be resolved" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -760,7 +751,6 @@ test "simple else prong allowed even when all errors handled" {
}
test "pointer to error union payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -794,7 +784,6 @@ const NoReturn = struct {
};
test "error union of noreturn used with if" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -809,7 +798,6 @@ test "error union of noreturn used with if" {
}
test "error union of noreturn used with try" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -821,7 +809,6 @@ test "error union of noreturn used with try" {
}
test "error union of noreturn used with catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -833,7 +820,6 @@ test "error union of noreturn used with catch" {
}
test "alignment of wrapping an error union payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -887,7 +873,6 @@ test "catch within a function that calls no errorable functions" {
}
test "error from comptime string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -913,7 +898,6 @@ test "field access of anyerror results in smaller error set" {
}
test "optional error union return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -928,7 +912,6 @@ test "optional error union return type" {
test "optional error set return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const E = error{ A, B };
const S = struct {
@@ -953,7 +936,6 @@ test "optional error set function parameter" {
test "returning an error union containing a type with no runtime bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ZeroByteType = struct {
@@ -969,7 +951,7 @@ test "returning an error union containing a type with no runtime bits" {
}
test "try used in recursive function with inferred error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1010,7 +992,6 @@ test "generic inline function returns inferred error set" {
}
test "function called at runtime is properly analyzed for inferred error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1065,8 +1046,8 @@ test "@errorCast from error union to error union" {
}
test "result location initialization of error union with OPV payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 12e9de1825..8f45405e94 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -18,7 +18,6 @@ fn unwrapAndAddOne(blah: ?i32) i32 {
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -71,7 +70,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 {
}
test "constant expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var array: [array_size]u8 = undefined;
@@ -93,7 +91,6 @@ fn letsTryToCompareBools(a: bool, b: bool) bool {
return max(bool, a, b);
}
test "inlined block and runtime block phi" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(letsTryToCompareBools(true, true));
@@ -140,7 +137,6 @@ test "pointer to type" {
}
test "a type constructed in a global expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -236,7 +232,6 @@ const vertices = [_]Vertex{
};
test "statically initialized list" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(static_point_list[0].x == 1);
@@ -342,7 +337,6 @@ fn doesAlotT(comptime T: type, value: usize) T {
}
test "@setEvalBranchQuota at same scope as generic function call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(doesAlotT(u32, 2) == 2);
@@ -394,7 +388,6 @@ test "return 0 from function that has u0 return type" {
}
test "statically initialized struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
st_init_str_foo.x += 1;
@@ -444,7 +437,6 @@ fn copyWithPartialInline(s: []u32, b: []u8) void {
test "binary math operator in partially inlined function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -462,7 +454,6 @@ test "binary math operator in partially inlined function" {
}
test "comptime shl" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -491,6 +482,7 @@ test "comptime bitwise operators" {
}
test "comptime shlWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -503,7 +495,6 @@ test "comptime shlWithOverflow" {
}
test "const ptr to variable data changes at runtime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -521,7 +512,6 @@ const foo_ref = &foo_contents;
test "runtime 128 bit integer division" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -536,7 +526,6 @@ test "runtime 128 bit integer division" {
}
test "@tagName of @typeInfo" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -545,7 +534,6 @@ test "@tagName of @typeInfo" {
}
test "static eval list init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -578,7 +566,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio
}
test "ptr to local array argument at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -741,7 +728,6 @@ test "*align(1) u16 is the same as *align(1:0:2) u16" {
test "array concatenation of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -751,7 +737,6 @@ test "array concatenation of function calls" {
test "array multiplication of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -769,7 +754,6 @@ fn scalar(x: u32) u32 {
test "array concatenation peer resolves element types - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [2]u3{ 1, 7 };
@@ -786,7 +770,6 @@ test "array concatenation peer resolves element types - value" {
test "array concatenation peer resolves element types - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -803,7 +786,6 @@ test "array concatenation peer resolves element types - pointer" {
test "array concatenation sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -823,7 +805,6 @@ test "array concatenation sets the sentinel - value" {
}
test "array concatenation sets the sentinel - pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -843,7 +824,6 @@ test "array concatenation sets the sentinel - pointer" {
test "array multiplication sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -862,7 +842,6 @@ test "array multiplication sets the sentinel - value" {
test "array multiplication sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -889,7 +868,6 @@ test "comptime assign int to optional int" {
test "two comptime calls with array default initialized to undefined" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -976,7 +954,6 @@ test "const local with comptime init through array init" {
}
test "closure capture type of runtime-known parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -992,7 +969,6 @@ test "closure capture type of runtime-known parameter" {
}
test "closure capture type of runtime-known var" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 1234;
@@ -1035,7 +1011,6 @@ test "comptime break passing through runtime condition converted to runtime brea
}
test "comptime break to outer loop passing through runtime condition converted to runtime break" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1088,7 +1063,6 @@ test "comptime break operand passing through runtime condition converted to runt
}
test "comptime break operand passing through runtime switch converted to runtime break" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1108,7 +1082,6 @@ test "comptime break operand passing through runtime switch converted to runtime
}
test "no dependency loop for alignment of self struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1147,7 +1120,6 @@ test "no dependency loop for alignment of self struct" {
}
test "no dependency loop for alignment of self bare union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1186,7 +1158,6 @@ test "no dependency loop for alignment of self bare union" {
}
test "no dependency loop for alignment of self tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1230,7 +1201,6 @@ test "equality of pointers to comptime const" {
}
test "storing an array of type in a field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1260,7 +1230,6 @@ test "storing an array of type in a field" {
}
test "pass pointer to field of comptime-only type as a runtime parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1387,7 +1356,6 @@ test "lazy sizeof union tag size in compare" {
}
test "lazy value is resolved as slice operand" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1568,7 +1536,7 @@ test "x or true is comptime-known true" {
}
test "non-optional and optional array elements concatenated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/export_builtin.zig b/test/behavior/export_builtin.zig
index c11b966eda..16fc0a7a79 100644
--- a/test/behavior/export_builtin.zig
+++ b/test/behavior/export_builtin.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "exporting enum value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@@ -23,7 +22,6 @@ test "exporting enum value" {
test "exporting with internal linkage" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn foo() callconv(.c) void {}
@@ -36,7 +34,6 @@ test "exporting with internal linkage" {
test "exporting using namespace access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@@ -57,7 +54,6 @@ test "exporting using namespace access" {
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index 3c94605994..742b306059 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -2,6 +2,7 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1339,6 +1340,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
}
test "@fieldParentPtr tagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1475,6 +1477,7 @@ test "@fieldParentPtr tagged union" {
}
test "@fieldParentPtr untagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index ba92318067..3f664473d1 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -143,7 +143,6 @@ test "cmp f64" {
}
test "cmp f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -154,7 +153,6 @@ test "cmp f128" {
}
test "cmp f80/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
@@ -223,6 +221,7 @@ fn testCmp(comptime T: type) !void {
}
test "vector cmp f16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -236,6 +235,7 @@ test "vector cmp f16" {
}
test "vector cmp f32" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -249,6 +249,7 @@ test "vector cmp f32" {
}
test "vector cmp f64" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -261,8 +262,8 @@ test "vector cmp f64" {
}
test "vector cmp f128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -276,6 +277,7 @@ test "vector cmp f128" {
}
test "vector cmp f80/c_longdouble" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -324,7 +326,6 @@ fn testCmpVector(comptime T: type) !void {
test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -371,7 +372,6 @@ test "negative f128 intFromFloat at compile-time" {
test "@sqrt f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -382,7 +382,6 @@ test "@sqrt f16" {
test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -394,7 +393,6 @@ test "@sqrt f32/f64" {
test "@sqrt f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -481,9 +479,9 @@ fn testSqrt(comptime T: type) !void {
}
test "@sqrt with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -503,7 +501,6 @@ fn testSqrtWithVectors() !void {
test "@sin f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -514,7 +511,6 @@ test "@sin f16" {
test "@sin f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -527,7 +523,6 @@ test "@sin f32/f64" {
test "@sin f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -554,9 +549,9 @@ fn testSin(comptime T: type) !void {
}
test "@sin with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -576,7 +571,6 @@ fn testSinWithVectors() !void {
test "@cos f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -587,7 +581,6 @@ test "@cos f16" {
test "@cos f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -600,7 +593,6 @@ test "@cos f32/f64" {
test "@cos f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -627,9 +619,9 @@ fn testCos(comptime T: type) !void {
}
test "@cos with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -649,7 +641,6 @@ fn testCosWithVectors() !void {
test "@tan f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -660,7 +651,6 @@ test "@tan f16" {
test "@tan f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -673,7 +663,6 @@ test "@tan f32/f64" {
test "@tan f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -700,9 +689,9 @@ fn testTan(comptime T: type) !void {
}
test "@tan with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -722,7 +711,6 @@ fn testTanWithVectors() !void {
test "@exp f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -733,7 +721,6 @@ test "@exp f16" {
test "@exp f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -746,7 +733,6 @@ test "@exp f32/f64" {
test "@exp f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -777,9 +763,9 @@ fn testExp(comptime T: type) !void {
}
test "@exp with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -799,7 +785,6 @@ fn testExpWithVectors() !void {
test "@exp2 f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -810,7 +795,6 @@ test "@exp2 f16" {
test "@exp2 f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -823,7 +807,6 @@ test "@exp2 f32/f64" {
test "@exp2 f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -849,9 +832,9 @@ fn testExp2(comptime T: type) !void {
}
test "@exp2 with @vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -870,7 +853,6 @@ fn testExp2WithVectors() !void {
}
test "@log f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -881,7 +863,6 @@ test "@log f16" {
}
test "@log f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -894,7 +875,6 @@ test "@log f32/f64" {
}
test "@log f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -921,8 +901,8 @@ fn testLog(comptime T: type) !void {
}
test "@log with @vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -940,7 +920,6 @@ test "@log with @vectors" {
}
test "@log2 f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -951,7 +930,6 @@ test "@log2 f16" {
}
test "@log2 f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -964,7 +942,6 @@ test "@log2 f32/f64" {
}
test "@log2 f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -991,8 +968,8 @@ fn testLog2(comptime T: type) !void {
}
test "@log2 with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1016,7 +993,6 @@ fn testLog2WithVectors() !void {
}
test "@log10 f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1027,7 +1003,6 @@ test "@log10 f16" {
}
test "@log10 f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1040,7 +1015,6 @@ test "@log10 f32/f64" {
}
test "@log10 f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1067,8 +1041,8 @@ fn testLog10(comptime T: type) !void {
}
test "@log10 with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1089,7 +1063,6 @@ fn testLog10WithVectors() !void {
test "@abs f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1099,7 +1072,6 @@ test "@abs f16" {
test "@abs f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testFabs(f32);
@@ -1110,7 +1082,6 @@ test "@abs f32/f64" {
test "@abs f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1190,7 +1161,7 @@ fn testFabs(comptime T: type) !void {
}
test "@abs with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1210,7 +1181,6 @@ fn testFabsWithVectors() !void {
}
test "@floor f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1220,7 +1190,6 @@ test "@floor f16" {
}
test "@floor f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1232,7 +1201,6 @@ test "@floor f32/f64" {
}
test "@floor f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1284,7 +1252,7 @@ fn testFloor(comptime T: type) !void {
}
test "@floor with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1304,7 +1272,6 @@ fn testFloorWithVectors() !void {
}
test "@ceil f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1314,7 +1281,6 @@ test "@ceil f16" {
}
test "@ceil f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1326,7 +1292,6 @@ test "@ceil f32/f64" {
}
test "@ceil f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1378,7 +1343,7 @@ fn testCeil(comptime T: type) !void {
}
test "@ceil with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1398,7 +1363,6 @@ fn testCeilWithVectors() !void {
}
test "@trunc f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1408,7 +1372,6 @@ test "@trunc f16" {
}
test "@trunc f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1420,7 +1383,6 @@ test "@trunc f32/f64" {
}
test "@trunc f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1472,7 +1434,7 @@ fn testTrunc(comptime T: type) !void {
}
test "@trunc with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1492,7 +1454,6 @@ fn testTruncWithVectors() !void {
}
test "neg f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1508,7 +1469,6 @@ test "neg f16" {
}
test "neg f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1520,7 +1480,6 @@ test "neg f32/f64" {
}
test "neg f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1606,7 +1565,6 @@ fn testNeg(comptime T: type) !void {
}
test "eval @setFloatMode at compile-time" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = comptime fnWithFloatMode();
@@ -1629,7 +1587,6 @@ test "f128 at compile time is lossy" {
test "comptime fixed-width float zero divided by zero produces NaN" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1641,7 +1598,6 @@ test "comptime fixed-width float zero divided by zero produces NaN" {
test "comptime fixed-width float non-zero divided by zero produces signed Inf" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1686,21 +1642,18 @@ test "comptime inf >= runtime 1" {
try std.testing.expect(f >= i);
}
test "comptime isNan(nan * 1)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_one = comptime std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
}
test "runtime isNan(nan * 1)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_one = std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
}
test "comptime isNan(nan * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_zero = comptime std.math.nan(f64) * 0;
@@ -1709,7 +1662,6 @@ test "comptime isNan(nan * 0)" {
try std.testing.expect(std.math.isNan(zero_times_nan));
}
test "runtime isNan(nan * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_zero = std.math.nan(f64) * 0;
@@ -1718,7 +1670,6 @@ test "runtime isNan(nan * 0)" {
try std.testing.expect(std.math.isNan(zero_times_nan));
}
test "comptime isNan(inf * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const inf_times_zero = comptime std.math.inf(f64) * 0;
@@ -1727,7 +1678,6 @@ test "comptime isNan(inf * 0)" {
try std.testing.expect(std.math.isNan(zero_times_inf));
}
test "runtime isNan(inf * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const inf_times_zero = std.math.inf(f64) * 0;
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 6fe569a1f6..ceae020bb8 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -78,7 +78,6 @@ test "return inner function which references comptime variable of outer function
test "discard the result of a function that returns a struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -101,7 +100,6 @@ test "discard the result of a function that returns a struct" {
test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -179,7 +177,6 @@ fn fComplexCallconvRet(x: u32) callconv(blk: {
test "function with complex callconv and return type expressions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(fComplexCallconvRet(3).x == 9);
@@ -255,7 +252,6 @@ test "pass by non-copying value as method, at comptime" {
test "implicit cast fn call result to optional in field result" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -283,7 +279,6 @@ test "implicit cast fn call result to optional in field result" {
test "void parameters" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try voidFun(1, void{}, 2, {});
}
@@ -306,7 +301,6 @@ fn acceptsString(foo: []u8) void {
}
test "function pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -345,7 +339,6 @@ fn numberLiteralArg(a: anytype) !void {
test "function call with anon list literal" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -365,7 +358,6 @@ test "function call with anon list literal" {
test "function call with anon list literal - 2D" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -426,7 +418,6 @@ test "import passed byref to function in return type" {
test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -485,7 +476,6 @@ test "method call with optional pointer first param" {
test "using @ptrCast on function pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -524,8 +514,6 @@ test "function returns function returning type" {
}
test "peer type resolution of inferred error set with non-void payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const S = struct {
fn openDataFile(mode: enum { read, write }) !u32 {
return switch (mode) {
@@ -582,7 +570,6 @@ test "pass and return comptime-only types" {
test "pointer to alias behaves same as pointer to function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 3ba33c2d88..a8ed6ec79e 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const mem = std.mem;
test "continue in for loop" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array = [_]i32{ 1, 2, 3, 4, 5 };
@@ -67,7 +66,6 @@ test "ignore lval with underscore (for loop)" {
test "basic for loop" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -111,7 +109,6 @@ test "basic for loop" {
test "for with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,7 +129,6 @@ test "for with null and T peer types and inferred result location type" {
}
test "2 break statements and an else" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -152,7 +148,6 @@ test "2 break statements and an else" {
}
test "for loop with pointer elem var" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -180,7 +175,6 @@ fn mangleString(s: []u8) void {
}
test "for copies its payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -198,7 +192,6 @@ test "for copies its payload" {
}
test "for on slice with allowzero ptr" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -215,7 +208,6 @@ test "for on slice with allowzero ptr" {
}
test "else continue outer for" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -230,8 +222,6 @@ test "else continue outer for" {
}
test "for loop with else branch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
{
var x = [_]u32{ 1, 2 };
_ = &x;
@@ -312,7 +302,6 @@ test "1-based counter and ptr to array" {
test "slice and two counters, one is offset and one is runtime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const slice: []const u8 = "blah";
@@ -342,7 +331,6 @@ test "slice and two counters, one is offset and one is runtime" {
test "two slices, one captured by-ref" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -362,7 +350,6 @@ test "two slices, one captured by-ref" {
test "raw pointer and slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -382,7 +369,6 @@ test "raw pointer and slice" {
test "raw pointer and counter" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -401,7 +387,6 @@ test "raw pointer and counter" {
test "inline for with slice as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const comptime_slice = "hello";
var runtime_i: usize = 3;
@@ -432,7 +417,6 @@ test "inline for with slice as the comptime-known" {
test "inline for with counter as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var runtime_slice = "hello";
@@ -464,7 +448,6 @@ test "inline for with counter as the comptime-known" {
test "inline for on tuple pointer" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct { u32, u32, u32 };
@@ -480,7 +463,6 @@ test "inline for on tuple pointer" {
test "ref counter that starts at zero" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
for ([_]usize{ 0, 1, 2 }, 0..) |i, j| {
try expectEqual(i, j);
@@ -495,7 +477,6 @@ test "ref counter that starts at zero" {
test "inferred alloc ptr of for loop" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
{
var cond = false;
@@ -516,7 +497,6 @@ test "inferred alloc ptr of for loop" {
}
test "for loop results in a bool" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try std.testing.expect(for ([1]u8{0}) |x| {
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index a76f532544..a4d5c90711 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -17,7 +17,6 @@ fn checkSize(comptime T: type) usize {
}
test "simple generic fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(max(i32, 3, -1) == 3);
@@ -53,7 +52,6 @@ fn sameButWithFloats(a: f64, b: f64) f64 {
test "fn with comptime args" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(gimmeTheBigOne(1234, 5678) == 5678);
@@ -63,7 +61,6 @@ test "fn with comptime args" {
test "anytype params" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(max_i32(12, 34) == 34);
@@ -87,7 +84,6 @@ fn max_f64(a: f64, b: f64) f64 {
}
test "type constructed by comptime function call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -113,7 +109,6 @@ fn SimpleList(comptime L: usize) type {
test "function with return type type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var list: List(i32) = undefined;
@@ -154,7 +149,6 @@ fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
test "generic fn with implicit cast" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -173,7 +167,6 @@ fn getFirstByte(comptime T: type, mem: []const T) u8 {
test "generic fn keeps non-generic parameter types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -249,7 +242,6 @@ test "function parameter is generic" {
}
test "generic function instantiation turns into comptime call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -300,7 +292,6 @@ test "generic function with void and comptime parameter" {
}
test "anonymous struct return type referencing comptime parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -318,7 +309,6 @@ test "anonymous struct return type referencing comptime parameter" {
test "generic function instantiation non-duplicates" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -339,7 +329,6 @@ test "generic function instantiation non-duplicates" {
test "generic instantiation of tagged union with only one field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.os.tag == .wasi) return error.SkipZigTest;
@@ -439,8 +428,6 @@ test "null sentinel pointer passed as generic argument" {
}
test "generic function passed as comptime argument" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const S = struct {
fn doMath(comptime f: fn (comptime type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void {
const result = try f(i32, a, b);
@@ -451,7 +438,6 @@ test "generic function passed as comptime argument" {
}
test "return type of generic function is function pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -464,8 +450,6 @@ test "return type of generic function is function pointer" {
}
test "coerced function body has inequal value with its uncoerced body" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
-
const S = struct {
const A = B(i32, c);
fn c() !i32 {
@@ -513,7 +497,6 @@ test "union in struct captures argument" {
test "function argument tuple used as struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -546,8 +529,8 @@ test "comptime callconv(.c) function ptr uses comptime type argument" {
}
test "call generic function with from function called by the generic function" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const GET = struct {
diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig
index c35fad1d1d..3701a547c9 100644
--- a/test/behavior/globals.zig
+++ b/test/behavior/globals.zig
@@ -6,7 +6,6 @@ var pos = [2]f32{ 0.0, 0.0 };
test "store to global array" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(pos[1] == 0.0);
pos = [2]f32{ 0.0, 1.0 };
@@ -15,9 +14,9 @@ test "store to global array" {
var vpos = @Vector(2, f32){ 0.0, 0.0 };
test "store to global vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(vpos[1] == 0.0);
vpos = @Vector(2, f32){ 0.0, 1.0 };
@@ -26,7 +25,6 @@ test "store to global vector" {
test "slices pointing at the same address as global array." {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -47,7 +45,6 @@ test "slices pointing at the same address as global array." {
test "global loads can affect liveness" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
const ByRef = struct {
@@ -188,6 +185,7 @@ test "function pointer field call on global extern struct, conditional on global
}
test "function pointer field call on global extern struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/if.zig b/test/behavior/if.zig
index bf71025bcd..78151da211 100644
--- a/test/behavior/if.zig
+++ b/test/behavior/if.zig
@@ -116,7 +116,6 @@ test "if prongs cast to expected type instead of peer type resolution" {
test "if peer expressions inferred optional type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -135,7 +134,6 @@ test "if peer expressions inferred optional type" {
test "if-else expression with runtime condition result location is inferred optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct { b: u64, c: u64 };
@@ -174,6 +172,8 @@ fn returnTrue() bool {
}
test "if value shouldn't be load-elided if used later (structs)" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const Foo = struct { x: i32 };
var a = Foo{ .x = 1 };
@@ -191,6 +191,8 @@ test "if value shouldn't be load-elided if used later (structs)" {
}
test "if value shouldn't be load-elided if used later (optionals)" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
var a: ?i32 = 1;
var b: ?i32 = 1;
diff --git a/test/behavior/import_c_keywords.zig b/test/behavior/import_c_keywords.zig
index dcc8fbad6f..6bea049f85 100644
--- a/test/behavior/import_c_keywords.zig
+++ b/test/behavior/import_c_keywords.zig
@@ -27,7 +27,6 @@ extern fn @"break"() Id;
extern fn an_alias_of_some_non_c_keyword_function() Id;
test "import c keywords" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig
index ba1d3bb6f5..a1efe7ab02 100644
--- a/test/behavior/inline_switch.zig
+++ b/test/behavior/inline_switch.zig
@@ -3,7 +3,6 @@ const expect = std.testing.expect;
const builtin = @import("builtin");
test "inline scalar prongs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: usize = 0;
@@ -18,7 +17,6 @@ test "inline scalar prongs" {
}
test "inline prong ranges" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: usize = 0;
@@ -33,7 +31,6 @@ test "inline prong ranges" {
const E = enum { a, b, c, d };
test "inline switch enums" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: E = .a;
@@ -46,7 +43,6 @@ test "inline switch enums" {
const U = union(E) { a: void, b: u2, c: u3, d: u4 };
test "inline switch unions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -73,7 +69,6 @@ test "inline switch unions" {
}
test "inline else bool" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = true;
@@ -85,7 +80,6 @@ test "inline else bool" {
}
test "inline else error" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Err = error{ a, b, c };
@@ -98,7 +92,6 @@ test "inline else error" {
}
test "inline else enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 };
@@ -111,7 +104,6 @@ test "inline else enum" {
}
test "inline else int with gaps" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -130,7 +122,6 @@ test "inline else int with gaps" {
}
test "inline else int all values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: u2 = 0;
diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig
index f83897318a..eddf74097b 100644
--- a/test/behavior/int128.zig
+++ b/test/behavior/int128.zig
@@ -5,7 +5,6 @@ const minInt = std.math.minInt;
const builtin = @import("builtin");
test "uint128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,7 +24,6 @@ test "uint128" {
}
test "undefined 128 bit int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -45,7 +43,6 @@ test "undefined 128 bit int" {
}
test "int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +65,6 @@ test "int128" {
}
test "truncate int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -92,7 +88,6 @@ test "truncate int128" {
}
test "shift int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/int_comparison_elision.zig b/test/behavior/int_comparison_elision.zig
index 837e52b50c..2bc55b9f39 100644
--- a/test/behavior/int_comparison_elision.zig
+++ b/test/behavior/int_comparison_elision.zig
@@ -15,7 +15,6 @@ test "int comparison elision" {
// TODO: support int types > 128 bits wide in other backends
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig
index a46ad2d8a8..4708af87f4 100644
--- a/test/behavior/ir_block_deps.zig
+++ b/test/behavior/ir_block_deps.zig
@@ -18,7 +18,6 @@ fn getErrInt() anyerror!i32 {
}
test "ir block deps" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig
index 99fc9b0b50..92ba8103ca 100644
--- a/test/behavior/lower_strlit_to_vector.zig
+++ b/test/behavior/lower_strlit_to_vector.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
test "strlit to vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 8b541913b9..08ebf3fe22 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -62,11 +62,11 @@ fn assertFalse(b: bool) !void {
}
test "@clz" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
try testClz();
try comptime testClz();
}
@@ -80,7 +80,6 @@ fn testClz() !void {
}
test "@clz big ints" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -100,8 +99,8 @@ fn testOneClz(comptime T: type, x: T) u32 {
}
test "@clz vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -146,7 +145,6 @@ fn expectVectorsEqual(a: anytype, b: anytype) !void {
}
test "@ctz" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -170,7 +168,7 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz 128-bit integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -188,8 +186,8 @@ fn testCtz128() !void {
}
test "@ctz vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -227,7 +225,6 @@ test "const number literal" {
const ten = 10;
test "float equality" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -433,7 +430,6 @@ test "binary not" {
}
test "binary not big int <= 128 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -467,7 +463,7 @@ test "binary not big int <= 128 bits" {
}
test "division" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -609,7 +605,6 @@ test "large integer division" {
}
test "division half-precision floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -696,7 +691,6 @@ fn testUnsignedNegationWrappingEval(x: u16) !void {
}
test "negation wrapping" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -749,7 +743,6 @@ fn testShrTrunc(x: u16) !void {
}
test "f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -778,6 +771,7 @@ fn should_not_be_zero(x: f128) !void {
}
test "umax wrapped squaring" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -834,7 +828,6 @@ test "umax wrapped squaring" {
}
test "128-bit multiplication" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -869,7 +862,6 @@ fn testAddWithOverflow(comptime T: type, a: T, b: T, add: T, bit: u1) !void {
}
test "@addWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -888,7 +880,6 @@ test "@addWithOverflow" {
}
test "@addWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -960,7 +951,7 @@ fn testMulWithOverflow(comptime T: type, a: T, b: T, mul: T, bit: u1) !void {
}
test "basic @mulWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -972,7 +963,7 @@ test "basic @mulWithOverflow" {
}
test "extensive @mulWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1012,11 +1003,9 @@ test "extensive @mulWithOverflow" {
}
test "@mulWithOverflow bitsize > 32" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // aarch64 fails on a release build of the compiler.
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testMulWithOverflow(u40, 3, 0x55_5555_5555, 0xff_ffff_ffff, 0);
@@ -1043,9 +1032,9 @@ test "@mulWithOverflow bitsize > 32" {
}
test "@mulWithOverflow bitsize 128 bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1070,6 +1059,7 @@ test "@mulWithOverflow bitsize 128 bits" {
}
test "@mulWithOverflow bitsize 256 bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1116,7 +1106,6 @@ fn testSubWithOverflow(comptime T: type, a: T, b: T, sub: T, bit: u1) !void {
}
test "@subWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1133,7 +1122,6 @@ test "@subWithOverflow" {
}
test "@subWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1181,7 +1169,7 @@ fn testShlWithOverflow(comptime T: type, a: T, b: math.Log2Int(T), shl: T, bit:
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1200,7 +1188,7 @@ test "@shlWithOverflow" {
}
test "@shlWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1269,7 +1257,6 @@ test "allow signed integer division/remainder when values are comptime-known and
}
test "quad hex float literal parsing accurate" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1427,8 +1414,8 @@ test "comptime float rem int" {
}
test "remainder division" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1467,7 +1454,6 @@ fn remdivOne(comptime T: type, a: T, b: T, c: T) !void {
test "float remainder division using @rem" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1508,8 +1494,8 @@ fn fremOne(comptime T: type, a: T, b: T, c: T, epsilon: T) !void {
}
test "float modulo division using @mod" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1552,7 +1538,6 @@ fn fmodOne(comptime T: type, a: T, b: T, c: T, epsilon: T) !void {
test "@round f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1562,7 +1547,6 @@ test "@round f16" {
test "@round f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1579,7 +1563,6 @@ test "@round f32/f64" {
test "@round f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -1591,7 +1574,6 @@ test "@round f80" {
test "@round f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -1608,9 +1590,9 @@ fn testRound(comptime T: type, x: T) !void {
}
test "vector integer addition" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1632,7 +1614,6 @@ test "vector integer addition" {
test "NaN comparison" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1650,7 +1631,6 @@ test "NaN comparison" {
test "NaN comparison f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1672,9 +1652,9 @@ fn testNanEqNan(comptime F: type) !void {
}
test "vector comparison" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1705,7 +1685,6 @@ test "compare undefined literal with comptime_int" {
test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1737,7 +1716,6 @@ test "signed zeros are represented properly" {
test "absFloat" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testAbsFloat();
@@ -1767,8 +1745,8 @@ test "mod lazy values" {
}
test "@clz works on both vector and scalar inputs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1786,7 +1764,6 @@ test "@clz works on both vector and scalar inputs" {
test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1816,7 +1793,6 @@ test "runtime comparison to NaN is comptime-known" {
test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1845,8 +1821,8 @@ test "runtime int comparison to inf is comptime-known" {
}
test "float divide by zero" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1880,8 +1856,8 @@ test "float divide by zero" {
}
test "partially-runtime integer vector division would be illegal if vector elements were reordered" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1909,8 +1885,8 @@ test "partially-runtime integer vector division would be illegal if vector eleme
}
test "float vector division of comptime zero by runtime nan is nan" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1927,8 +1903,8 @@ test "float vector division of comptime zero by runtime nan is nan" {
}
test "float vector multiplication of comptime zero by runtime nan is nan" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1945,7 +1921,6 @@ test "float vector multiplication of comptime zero by runtime nan is nan" {
test "comptime float vector division of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1961,7 +1936,6 @@ test "comptime float vector division of zero by nan is nan" {
test "comptime float vector multiplication of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig
index b34df4b784..0f4611eb30 100644
--- a/test/behavior/maximum_minimum.zig
+++ b/test/behavior/maximum_minimum.zig
@@ -7,7 +7,6 @@ const expectEqual = std.testing.expectEqual;
test "@max" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -28,9 +27,9 @@ test "@max" {
}
test "@max on vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -61,7 +60,6 @@ test "@max on vectors" {
}
test "@min" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -83,8 +81,8 @@ test "@min" {
}
test "@min for vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -116,7 +114,6 @@ test "@min for vectors" {
}
test "@min/max for floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -167,8 +164,8 @@ test "@min/@max more than two arguments" {
}
test "@min/@max more than two vector arguments" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -181,7 +178,6 @@ test "@min/@max more than two vector arguments" {
}
test "@min/@max notices bounds" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -198,8 +194,8 @@ test "@min/@max notices bounds" {
}
test "@min/@max notices vector bounds" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -217,7 +213,6 @@ test "@min/@max notices vector bounds" {
}
test "@min/@max on comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -231,7 +226,6 @@ test "@min/@max on comptime_int" {
}
test "@min/@max notices bounds from types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -251,8 +245,8 @@ test "@min/@max notices bounds from types" {
}
test "@min/@max notices bounds from vector types" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -273,7 +267,6 @@ test "@min/@max notices bounds from vector types" {
}
test "@min/@max notices bounds from types when comptime-known value is undef" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -293,8 +286,8 @@ test "@min/@max notices bounds from types when comptime-known value is undef" {
}
test "@min/@max notices bounds from vector types when element of comptime-known vector is undef" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -333,7 +326,6 @@ test "@min/@max of signed and unsigned runtime integers" {
}
test "@min resulting in u0" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -364,8 +356,8 @@ test "@min/@max with runtime signed and unsigned integers of same size" {
}
test "@min/@max with runtime vectors of signed and unsigned integers of same size" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig
index e9f416e74e..3626e391ac 100644
--- a/test/behavior/member_func.zig
+++ b/test/behavior/member_func.zig
@@ -28,7 +28,6 @@ const HasFuncs = struct {
test "standard field calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -72,7 +71,6 @@ test "standard field calls" {
test "@field field calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig
index 38545f3a53..c35492e316 100644
--- a/test/behavior/memcpy.zig
+++ b/test/behavior/memcpy.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const assert = std.debug.assert;
test "memcpy and memset intrinsics" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -26,7 +25,6 @@ fn testMemcpyMemset() !void {
}
test "@memcpy with both operands single-ptr-to-array, one is null-terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -47,7 +45,6 @@ fn testMemcpyBothSinglePtrArrayOneIsNullTerminated() !void {
}
test "@memcpy dest many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -70,7 +67,6 @@ fn testMemcpyDestManyPtr() !void {
}
test "@memcpy C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -156,6 +152,7 @@ test "@memcpy zero-bit type with aliasing" {
}
test "@memcpy with sentinel" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/memmove.zig b/test/behavior/memmove.zig
index 36af982c84..d0e5e5941c 100644
--- a/test/behavior/memmove.zig
+++ b/test/behavior/memmove.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "memmove and memset intrinsics" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMemmoveMemset() !void {
}
test "@memmove with both operands single-ptr-to-array, one is null-terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -77,7 +75,6 @@ fn testMemmoveBothSinglePtrArrayOneIsNullTerminated() !void {
}
test "@memmove dest many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig
index 33a441da20..0a15dc3cce 100644
--- a/test/behavior/memset.zig
+++ b/test/behavior/memset.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@memset on array pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMemsetArray() !void {
}
test "@memset on slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +66,6 @@ fn testMemsetSlice() !void {
}
test "memset with bool element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,7 +77,6 @@ test "memset with bool element" {
}
test "memset with 1-byte struct element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -94,7 +90,6 @@ test "memset with 1-byte struct element" {
}
test "memset with 1-byte array element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -145,7 +140,6 @@ test "memset with large array element, comptime known" {
}
test "@memset provides result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -164,7 +158,6 @@ test "@memset provides result type" {
}
test "zero keys with @memset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index 210485b239..5ea6e92157 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "@mulAdd" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMulAdd() !void {
test "@mulAdd f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -51,7 +49,6 @@ fn testMulAdd16() !void {
test "@mulAdd f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -71,7 +68,6 @@ fn testMulAdd80() !void {
test "@mulAdd f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -103,9 +99,9 @@ fn vector16() !void {
}
test "vector f16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -128,9 +124,9 @@ fn vector32() !void {
}
test "vector f32" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -153,9 +149,9 @@ fn vector64() !void {
}
test "vector f64" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -177,9 +173,9 @@ fn vector80() !void {
}
test "vector f80" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -203,9 +199,9 @@ fn vector128() !void {
}
test "vector f128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
diff --git a/test/behavior/multiple_externs_with_conflicting_types.zig b/test/behavior/multiple_externs_with_conflicting_types.zig
index 7b3453695c..787a18a78f 100644
--- a/test/behavior/multiple_externs_with_conflicting_types.zig
+++ b/test/behavior/multiple_externs_with_conflicting_types.zig
@@ -11,7 +11,6 @@ comptime {
const builtin = @import("builtin");
test "call extern function defined with conflicting type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/nan.zig b/test/behavior/nan.zig
index 40c1b12850..b96fe1a649 100644
--- a/test/behavior/nan.zig
+++ b/test/behavior/nan.zig
@@ -23,7 +23,6 @@ const snan_f128: f128 = math.snan(f128);
test "nan memory equality" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index 63acb07835..7b6806428d 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -29,8 +29,8 @@ test "optional type" {
}
test "test maybe object and get a pointer to the inner value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -51,7 +51,6 @@ test "rhs maybe unwrap return" {
test "maybe return" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try maybeReturnImpl();
@@ -140,8 +139,8 @@ test "optional pointer to 0 bit type null value at runtime" {
}
test "if var maybe pointer" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -185,7 +184,6 @@ const here_is_a_null_literal = SillyStruct{ .context = null };
test "unwrap optional which is field of global var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
struct_with_optional.field = null;
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index cbd82475a4..11d4ee0537 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -59,6 +59,7 @@ fn testNullPtrsEql() !void {
}
test "optional with zero-bit type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -109,7 +110,6 @@ test "optional with zero-bit type" {
}
test "address of unwrap optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -132,7 +132,6 @@ test "address of unwrap optional" {
}
test "nested optional field in struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -210,6 +209,7 @@ test "equality compare optionals and non-optionals" {
}
test "compare optionals with modified payloads" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var lhs: ?bool = false;
@@ -319,7 +319,6 @@ test "assigning to an unwrapped optional field in an inline loop" {
}
test "coerce an anon struct literal to optional struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -339,7 +338,6 @@ test "coerce an anon struct literal to optional struct" {
}
test "0-bit child type coerced to optional return ptr result location" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -365,6 +363,7 @@ test "0-bit child type coerced to optional return ptr result location" {
}
test "0-bit child type coerced to optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -391,7 +390,7 @@ test "0-bit child type coerced to optional" {
}
test "array of optional unaligned types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -428,8 +427,8 @@ test "array of optional unaligned types" {
}
test "optional pointer to zero bit optional payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -448,7 +447,6 @@ test "optional pointer to zero bit optional payload" {
test "optional pointer to zero bit error union payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -543,7 +541,6 @@ test "alignment of wrapping an optional payload" {
}
test "Optional slice size is optimized" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -556,7 +553,7 @@ test "Optional slice size is optimized" {
}
test "Optional slice passed to function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -574,7 +571,6 @@ test "Optional slice passed to function" {
}
test "peer type resolution in nested if expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Thing = struct { n: i32 };
@@ -623,8 +619,8 @@ test "variable of optional of noreturn" {
}
test "copied optional doesn't alias source" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -637,8 +633,8 @@ test "copied optional doesn't alias source" {
}
test "result location initialization of optional with OPV payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index b7ba14a4b2..2d057e21df 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -120,7 +120,6 @@ test "consistent size of packed structs" {
}
test "correct sizeOf and offsets in packed structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -187,7 +186,6 @@ test "correct sizeOf and offsets in packed structs" {
}
test "nested packed structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -484,7 +482,6 @@ test "load pointer from packed struct" {
}
test "@intFromPtr on a packed struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -507,7 +504,6 @@ test "@intFromPtr on a packed struct field" {
}
test "@intFromPtr on a packed struct field unaligned and nested" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -617,6 +613,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
}
test "packed struct fields modification" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/16615
@@ -656,9 +653,9 @@ test "optional pointer in packed struct" {
}
test "nested packed struct field access test" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -777,6 +774,7 @@ test "nested packed struct field access test" {
}
test "nested packed struct at non-zero offset" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -915,7 +913,6 @@ test "packed struct passed to callconv(.c) function" {
}
test "overaligned pointer to packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -937,7 +934,7 @@ test "overaligned pointer to packed struct" {
}
test "packed struct initialized in bitcast" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -986,8 +983,8 @@ test "store undefined to packed result location" {
try expectEqual(x, s.x);
}
+// Originally reported at https://github.com/ziglang/zig/issues/9914
test "bitcast back and forth" {
- // Originally reported at https://github.com/ziglang/zig/issues/9914
const S = packed struct { one: u6, two: u1 };
const s = S{ .one = 0b110101, .two = 0b1 };
const u: u7 = @bitCast(s);
@@ -996,8 +993,9 @@ test "bitcast back and forth" {
try expect(s.two == s2.two);
}
+// Originally reported at https://github.com/ziglang/zig/issues/14200
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
- // Originally reported at https://github.com/ziglang/zig/issues/14200
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1015,8 +1013,8 @@ test "field access of packed struct smaller than its abi size inside struct init
try expect(@as(i2, 1) == s.ps.y);
}
+// Originally reported at https://github.com/ziglang/zig/issues/14632
test "modify nested packed struct aligned field" {
- // Originally reported at https://github.com/ziglang/zig/issues/14632
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@@ -1045,10 +1043,10 @@ test "modify nested packed struct aligned field" {
try std.testing.expect(!opts.baz);
}
+// Originally reported at https://github.com/ziglang/zig/issues/9674
test "assigning packed struct inside another packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // Originally reported at https://github.com/ziglang/zig/issues/9674
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -1078,7 +1076,6 @@ test "assigning packed struct inside another packed struct" {
}
test "packed struct used as part of anon decl name" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1104,7 +1101,13 @@ test "packed struct acts as a namespace" {
}
test "pointer loaded correctly from packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+
+ if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // crashes MSVC
const RAM = struct {
data: [0xFFFF + 1]u8,
@@ -1132,12 +1135,6 @@ test "pointer loaded correctly from packed struct" {
}
}
};
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-
- if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // crashes MSVC
var ram = try RAM.new();
var cpu = try CPU.new(&ram);
@@ -1146,7 +1143,7 @@ test "pointer loaded correctly from packed struct" {
}
test "assignment to non-byte-aligned field in packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1167,7 +1164,6 @@ test "assignment to non-byte-aligned field in packed struct" {
}
test "packed struct field pointer aligned properly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1186,7 +1182,7 @@ test "packed struct field pointer aligned properly" {
}
test "load flag from packed struct in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1311,6 +1307,7 @@ test "packed struct equality" {
}
test "packed struct equality ignores padding bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1322,6 +1319,8 @@ test "packed struct equality ignores padding bits" {
}
test "packed struct with signed field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
var s: packed struct {
a: i2,
b: u6,
@@ -1332,6 +1331,7 @@ test "packed struct with signed field" {
}
test "assign packed struct initialized with RLS to packed struct literal field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1349,6 +1349,7 @@ test "assign packed struct initialized with RLS to packed struct literal field"
}
test "byte-aligned packed relocation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1363,6 +1364,7 @@ test "byte-aligned packed relocation" {
}
test "packed struct store of comparison result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig
index 9d8235a021..17cfa82cd7 100644
--- a/test/behavior/packed-union.zig
+++ b/test/behavior/packed-union.zig
@@ -99,10 +99,10 @@ fn testFlagsInPackedUnionAtOffset() !void {
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_2);
}
+// Originally reported at https://github.com/ziglang/zig/issues/16581
test "packed union in packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // Originally reported at https://github.com/ziglang/zig/issues/16581
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testPackedUnionInPackedStruct();
@@ -136,7 +136,7 @@ fn testPackedUnionInPackedStruct() !void {
}
test "packed union initialized with a runtime value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig
index 215c3efd38..84255d0585 100644
--- a/test/behavior/packed_struct_explicit_backing_int.zig
+++ b/test/behavior/packed_struct_explicit_backing_int.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const native_endian = builtin.cpu.arch.endian();
test "packed struct explicit backing integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig
index d98c1a73d9..2a786ecdb6 100644
--- a/test/behavior/pointers.zig
+++ b/test/behavior/pointers.zig
@@ -18,7 +18,6 @@ fn testDerefPtr() !void {
}
test "pointer-integer arithmetic" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -232,7 +231,6 @@ test "peer type resolution with C pointer and const pointer" {
test "implicit casting between C pointer and optional non-C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -248,8 +246,8 @@ test "implicit casting between C pointer and optional non-C pointer" {
}
test "implicit cast error unions with non-optional to optional pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -298,7 +296,6 @@ test "allowzero pointer and slice" {
test "assign null directly to C pointer and test null equality" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -366,7 +363,6 @@ test "array initialization types" {
}
test "null terminated pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -384,7 +380,6 @@ test "null terminated pointer" {
}
test "allow any sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -401,7 +396,6 @@ test "allow any sentinel" {
}
test "pointer sentinel with enums" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -442,7 +436,6 @@ test "pointer sentinel with optional element" {
}
test "pointer sentinel with +inf" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -515,7 +508,6 @@ test "@intFromPtr on null optional at comptime" {
}
test "indexing array with sentinel returns correct type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -525,7 +517,6 @@ test "indexing array with sentinel returns correct type" {
}
test "element pointer to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -548,7 +539,6 @@ test "element pointer to slice" {
}
test "element pointer arithmetic to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -604,7 +594,6 @@ test "pointer to constant decl preserves alignment" {
test "ptrCast comptime known slice to C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -625,7 +614,6 @@ test "pointer alignment and element type include call expression" {
}
test "pointer to array has explicit alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index d5e6af9b3b..8286e25921 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@popCount integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -15,7 +14,7 @@ test "@popCount integers" {
}
test "@popCount 128bit integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -77,8 +76,8 @@ fn testPopCountIntegers() !void {
}
test "@popCount vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig
index 212ab5bde1..7b6e6edbb8 100644
--- a/test/behavior/ptrcast.zig
+++ b/test/behavior/ptrcast.zig
@@ -22,7 +22,6 @@ fn testReinterpretBytesAsInteger() !void {
test "reinterpret an array over multiple elements, with no well-defined layout" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -56,7 +55,6 @@ fn testReinterpretStructWrappedBytesAsInteger() !void {
}
test "reinterpret bytes of an array into an extern struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -130,7 +128,6 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void {
test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Test lowering a field ptr
@@ -152,7 +149,6 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
test "lower reinterpreted comptime field ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Test lowering a field ptr
@@ -174,7 +170,6 @@ test "lower reinterpreted comptime field ptr" {
test "reinterpret struct field at comptime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const numNative = comptime Bytes.init(0x12345678);
@@ -232,7 +227,6 @@ test "ptrcast of const integer has the correct object size" {
test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [4]u8 = "aoeu".*;
@@ -244,7 +238,6 @@ test "implicit optional pointer to optional anyopaque pointer" {
test "@ptrCast slice to slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -262,7 +255,6 @@ test "@ptrCast slice to slice" {
test "comptime @ptrCast a subset of an array, then write through it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -354,7 +346,6 @@ test "@ptrCast restructures sliced comptime-only array" {
test "@ptrCast slice multiplying length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -372,7 +363,6 @@ test "@ptrCast slice multiplying length" {
test "@ptrCast array pointer to slice multiplying length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -390,7 +380,6 @@ test "@ptrCast array pointer to slice multiplying length" {
test "@ptrCast slice dividing length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -408,7 +397,6 @@ test "@ptrCast slice dividing length" {
test "@ptrCast array pointer to slice dividing length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -426,7 +414,6 @@ test "@ptrCast array pointer to slice dividing length" {
test "@ptrCast slice with complex length increase" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -447,7 +434,6 @@ test "@ptrCast slice with complex length increase" {
test "@ptrCast array pointer to slice with complex length increase" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -468,7 +454,6 @@ test "@ptrCast array pointer to slice with complex length increase" {
test "@ptrCast slice with complex length decrease" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -489,7 +474,6 @@ test "@ptrCast slice with complex length decrease" {
test "@ptrCast array pointer to slice with complex length decrease" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -510,7 +494,6 @@ test "@ptrCast array pointer to slice with complex length decrease" {
test "@ptrCast slice of zero-bit type to different slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -530,7 +513,6 @@ test "@ptrCast slice of zero-bit type to different slice" {
test "@ptrCast single-item pointer to slice with length 1" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -552,7 +534,6 @@ test "@ptrCast single-item pointer to slice with length 1" {
test "@ptrCast single-item pointer to slice of bytes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -571,3 +552,13 @@ test "@ptrCast single-item pointer to slice of bytes" {
try comptime S.doTheTest(void, &{});
try comptime S.doTheTest(struct { x: u32 }, &.{ .x = 123 });
}
+
+test "@ptrCast array pointer removing sentinel" {
+ const in: *const [4:0]u8 = &.{ 1, 2, 3, 4 };
+ const out: []const i8 = @ptrCast(in);
+ comptime assert(out.len == 4);
+ comptime assert(out[0] == 1);
+ comptime assert(out[1] == 2);
+ comptime assert(out[2] == 3);
+ comptime assert(out[3] == 4);
+}
diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig
index 07703298cc..f72e64f87f 100644
--- a/test/behavior/ptrfromint.zig
+++ b/test/behavior/ptrfromint.zig
@@ -17,7 +17,6 @@ fn addressToFunction() void {
test "mutate through ptr initialized with constant ptrFromInt value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -35,7 +34,6 @@ fn forceCompilerAnalyzeBranchHardCodedPtrDereference(x: bool) void {
test "@ptrFromInt creates null pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -45,7 +43,6 @@ test "@ptrFromInt creates null pointer" {
test "@ptrFromInt creates allowzero zero pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const ptr = @as(*allowzero u32, @ptrFromInt(0));
diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
index 0fa079c96b..95448a382c 100644
--- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
+++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
@@ -6,7 +6,6 @@ const mem = std.mem;
var ok: bool = false;
test "reference a variable in an if after an if in the 2nd switch prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index 02bc0c9e4b..3f9e6f35eb 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -26,7 +26,6 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
}
test "reflection: @field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var f = Foo{
diff --git a/test/behavior/return_address.zig b/test/behavior/return_address.zig
index 6f603ff632..d7fb76d3b0 100644
--- a/test/behavior/return_address.zig
+++ b/test/behavior/return_address.zig
@@ -6,7 +6,6 @@ fn retAddr() usize {
}
test "return address" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig
index 1abd5b4dab..900dee8811 100644
--- a/test/behavior/saturating_arithmetic.zig
+++ b/test/behavior/saturating_arithmetic.zig
@@ -5,7 +5,7 @@ const maxInt = std.math.maxInt;
const expect = std.testing.expect;
test "saturating add" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -52,8 +52,8 @@ test "saturating add" {
}
test "saturating add 128bit" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -79,7 +79,7 @@ test "saturating add 128bit" {
}
test "saturating subtraction" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -125,8 +125,8 @@ test "saturating subtraction" {
}
test "saturating subtraction 128bit" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -158,7 +158,6 @@ fn testSatMul(comptime T: type, a: T, b: T, expected: T) !void {
}
test "saturating multiplication <= 32 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -229,6 +228,7 @@ test "saturating multiplication <= 32 bits" {
}
test "saturating mul i64, i128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -256,8 +256,8 @@ test "saturating mul i64, i128" {
}
test "saturating multiplication" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -295,7 +295,7 @@ test "saturating multiplication" {
}
test "saturating shift-left" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -340,6 +340,7 @@ test "saturating shift-left" {
}
test "saturating shift-left large rhs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -357,7 +358,7 @@ test "saturating shift-left large rhs" {
}
test "saturating shl uses the LHS type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/select.zig b/test/behavior/select.zig
index e6bd03b3e4..540f686b28 100644
--- a/test/behavior/select.zig
+++ b/test/behavior/select.zig
@@ -4,9 +4,9 @@ const mem = std.mem;
const expect = std.testing.expect;
test "@select vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -35,9 +35,9 @@ fn selectVectors() !void {
}
test "@select arrays" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -66,6 +66,7 @@ fn selectArrays() !void {
}
test "@select compare result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index 08523eb04c..cff85a3c66 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -5,7 +5,7 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@shuffle int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -50,8 +50,8 @@ test "@shuffle int" {
}
test "@shuffle int strange sizes" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -132,8 +132,8 @@ fn testShuffle(
}
test "@shuffle bool 1" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -155,8 +155,8 @@ test "@shuffle bool 1" {
}
test "@shuffle bool 2" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index b5f7e966f0..4ce2d3a24e 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -270,7 +270,6 @@ test "bitSizeOf comptime_int" {
}
test "runtime instructions inside typeof in comptime only scope" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -326,7 +325,6 @@ test "lazy abi size used in comparison" {
}
test "peer type resolution with @TypeOf doesn't trigger dependency loop check" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -437,7 +435,6 @@ test "Peer resolution of extern function calls in @TypeOf" {
}
test "Extern function calls, dereferences and field access in @TypeOf" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Test = struct {
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 22031fe698..d58cf79276 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -211,7 +211,6 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -273,7 +272,6 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -287,7 +285,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -299,7 +296,6 @@ test "C pointer" {
}
test "C pointer slice access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -331,7 +327,6 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -390,7 +385,6 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -445,7 +439,6 @@ test "slice multi-pointer without end" {
}
test "slice syntax resulting in pointer-to-array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -665,7 +658,6 @@ test "slice syntax resulting in pointer-to-array" {
}
test "slice pointer-to-array null terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -718,7 +710,7 @@ test "slice pointer-to-array zero length" {
}
test "type coercion of pointer to anon struct literal to pointer to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -812,7 +804,6 @@ test "slice sentinel access at comptime" {
}
test "slicing array with sentinel as end index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -831,7 +822,6 @@ test "slicing array with sentinel as end index" {
}
test "slicing slice with sentinel as end index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -888,7 +878,7 @@ test "slice field ptr var" {
}
test "global slice field access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -929,7 +919,6 @@ test "slice with dereferenced value" {
}
test "empty slice ptr is non null" {
- if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // Test assumes `undefined` is non-zero
{
@@ -947,7 +936,6 @@ test "empty slice ptr is non null" {
}
test "slice decays to many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -957,7 +945,6 @@ test "slice decays to many pointer" {
}
test "write through pointer to optional slice arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -977,7 +964,6 @@ test "write through pointer to optional slice arg" {
}
test "modify slice length at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -994,7 +980,6 @@ test "modify slice length at comptime" {
}
test "slicing zero length array field of struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1010,7 +995,6 @@ test "slicing zero length array field of struct" {
}
test "slicing slices gives correct result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1024,7 +1008,6 @@ test "slicing slices gives correct result" {
}
test "get address of element of zero-sized slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1037,7 +1020,6 @@ test "get address of element of zero-sized slice" {
}
test "sentinel-terminated 0-length slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1058,8 +1040,6 @@ test "sentinel-terminated 0-length slices" {
}
test "peer slices keep abi alignment with empty struct" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-
var cond: bool = undefined;
cond = false;
const slice = if (cond) &[1]u32{42} else &.{};
diff --git a/test/behavior/src.zig b/test/behavior/src.zig
index d6f3407e13..61c0467eb7 100644
--- a/test/behavior/src.zig
+++ b/test/behavior/src.zig
@@ -16,7 +16,6 @@ const expect = std.testing.expect;
const expectEqualStrings = std.testing.expectEqualStrings;
test "@src" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/string_literals.zig b/test/behavior/string_literals.zig
index 5701f94e07..c1e89bacb2 100644
--- a/test/behavior/string_literals.zig
+++ b/test/behavior/string_literals.zig
@@ -6,7 +6,6 @@ const tag_name = @tagName(TestEnum.TestEnumValue);
const ptr_tag_name: [*:0]const u8 = tag_name;
test "@tagName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -20,7 +19,6 @@ const error_name = @errorName(TestError.TestErrorCode);
const ptr_error_name: [*:0]const u8 = error_name;
test "@errorName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -34,7 +32,6 @@ const type_name = @typeName(TestType);
const ptr_type_name: [*:0]const u8 = type_name;
test "@typeName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -48,7 +45,6 @@ const ptr_actual_contents: [*:0]const u8 = actual_contents;
const expected_contents = "hello zig\n";
test "@embedFile() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +60,7 @@ fn testFnForSrc() std.builtin.SourceLocation {
}
test "@src() returns a struct containing 0-terminated string slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 4a66c56cea..3c4c4d7f80 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -10,7 +10,6 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var instance = @This(){
@@ -87,7 +86,6 @@ const StructFoo = struct {
};
test "structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -200,7 +198,6 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Bar = struct {
@@ -237,7 +234,6 @@ test "call method with mutable reference to struct with no fields" {
}
test "struct field init with catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -280,7 +276,6 @@ const Val = struct {
};
test "struct point to self" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -297,7 +292,6 @@ test "struct point to self" {
}
test "void struct fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const foo = VoidStructFieldsFoo{
@@ -335,7 +329,6 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
}
test "self-referencing struct via array member" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -492,7 +485,7 @@ const Bitfields = packed struct {
};
test "packed struct fields are ordered from LSB to MSB" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -610,7 +603,6 @@ fn getC(data: *const BitField1) u2 {
}
test "default struct initialization fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -634,8 +626,8 @@ test "default struct initialization fields" {
}
test "packed array 24bits" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -701,8 +693,8 @@ const FooArrayOfAligned = packed struct {
};
test "pointer to packed struct member in a stack variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -731,7 +723,6 @@ test "packed struct with u0 field access" {
}
test "access to global struct fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
g_foo.bar.value = 42;
@@ -753,8 +744,8 @@ const S0 = struct {
var g_foo: S0 = S0.init();
test "packed struct with fp fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -783,7 +774,6 @@ test "packed struct with fp fields" {
test "fn with C calling convention returns struct by value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -807,7 +797,6 @@ test "fn with C calling convention returns struct by value" {
}
test "non-packed struct with u128 entry in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -834,8 +823,8 @@ test "non-packed struct with u128 entry in union" {
}
test "packed struct field passed to generic function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -859,7 +848,6 @@ test "packed struct field passed to generic function" {
}
test "anonymous struct literal syntax" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -951,7 +939,6 @@ test "comptime struct field" {
test "tuple element initialized with fn call" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -968,8 +955,8 @@ test "tuple element initialized with fn call" {
}
test "struct with union field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -990,7 +977,6 @@ test "struct with union field" {
}
test "struct with 0-length union array field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1078,7 +1064,7 @@ test "for loop over pointers to struct, getting field from struct pointer" {
}
test "anon init through error unions and optionals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1105,7 +1091,6 @@ test "anon init through error unions and optionals" {
}
test "anon init through optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1125,7 +1110,6 @@ test "anon init through optional" {
}
test "anon init through error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1145,7 +1129,7 @@ test "anon init through error union" {
}
test "typed init through error unions and optionals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1171,7 +1155,6 @@ test "typed init through error unions and optionals" {
}
test "initialize struct with empty literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { x: i32 = 1234 };
@@ -1206,7 +1189,7 @@ test "loading a struct pointer perfoms a copy" {
}
test "packed struct aggregate init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1226,7 +1209,7 @@ test "packed struct aggregate init" {
}
test "packed struct field access via pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1267,7 +1250,6 @@ test "store to comptime field" {
}
test "struct field init value is size of the struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const namespace = struct {
@@ -1282,7 +1264,6 @@ test "struct field init value is size of the struct" {
}
test "under-aligned struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1306,7 +1287,6 @@ test "under-aligned struct field" {
}
test "fieldParentPtr of a zero-bit field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1357,7 +1337,6 @@ test "fieldParentPtr of a zero-bit field" {
test "struct field has a pointer to an aligned version of itself" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const E = struct {
next: *align(1) @This(),
@@ -1415,7 +1394,6 @@ test "struct has only one reference" {
}
test "no dependency loop on pointer to optional struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1437,7 +1415,6 @@ test "discarded struct initialization works as expected" {
}
test "function pointer in struct returns the struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const A = struct {
@@ -1455,7 +1432,6 @@ test "function pointer in struct returns the struct" {
test "no dependency loop on optional field wrapped in generic function" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn Atomic(comptime T: type) type {
@@ -1473,7 +1449,6 @@ test "no dependency loop on optional field wrapped in generic function" {
}
test "optional field init with tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -1488,8 +1463,6 @@ test "optional field init with tuple" {
}
test "if inside struct init inside if" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const MyStruct = struct { x: u32 };
const b: u32 = 5;
var i: u32 = 1;
@@ -1580,7 +1553,6 @@ test "instantiate struct with comptime field" {
test "struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1610,7 +1582,6 @@ test "struct field pointer has correct alignment" {
test "extern struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1828,7 +1799,6 @@ test "tuple with comptime-only field" {
}
test "extern struct fields are aligned to 1" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Foo = extern struct {
@@ -1845,7 +1815,7 @@ test "extern struct fields are aligned to 1" {
}
test "assign to slice.len of global variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1897,7 +1867,6 @@ test "array of structs inside struct initialized with undefined" {
}
test "runtime call in nested initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1929,7 +1898,6 @@ test "runtime call in nested initializer" {
}
test "runtime value in nested initializer passed as pointer to function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Bar = struct {
@@ -1953,7 +1921,7 @@ test "runtime value in nested initializer passed as pointer to function" {
}
test "struct field default value is a call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2001,7 +1969,6 @@ test "aggregate initializers should allow initializing comptime fields, verifyin
test "assignment of field with padding" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Mesh = extern struct {
@@ -2031,7 +1998,6 @@ test "assignment of field with padding" {
test "initiate global variable with runtime value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -2126,6 +2092,7 @@ test "anonymous struct equivalence" {
}
test "field access through mem ptr arg" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/struct_contains_null_ptr_itself.zig b/test/behavior/struct_contains_null_ptr_itself.zig
index 8941f1bde4..15c14cc3d2 100644
--- a/test/behavior/struct_contains_null_ptr_itself.zig
+++ b/test/behavior/struct_contains_null_ptr_itself.zig
@@ -3,7 +3,6 @@ const expect = std.testing.expect;
const builtin = @import("builtin");
test "struct contains null pointer which contains original struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/struct_contains_slice_of_itself.zig b/test/behavior/struct_contains_slice_of_itself.zig
index 2001b4cfaa..a54049d123 100644
--- a/test/behavior/struct_contains_slice_of_itself.zig
+++ b/test/behavior/struct_contains_slice_of_itself.zig
@@ -12,6 +12,7 @@ const NodeAligned = struct {
};
test "struct contains slice of itself" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -52,7 +53,7 @@ test "struct contains slice of itself" {
}
test "struct contains aligned slice of itself" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index 873ea1f1da..0d0cb0e4f4 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -43,7 +43,7 @@ fn testSwitchWithAllRanges(x: u32, y: u32) u32 {
}
test "switch arbitrary int size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -274,8 +274,8 @@ const SwitchProngWithVarEnum = union(enum) {
};
test "switch prong with variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -300,7 +300,6 @@ fn switchProngWithVarFn(a: SwitchProngWithVarEnum) !void {
test "switch on enum using pointer capture" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testSwitchEnumPtrCapture();
@@ -361,7 +360,6 @@ fn testSwitchHandleAllCasesRange(x: u8) u8 {
test "switch on union with some prongs capturing" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const X = union(enum) {
@@ -398,7 +396,6 @@ test "switch on const enum with var" {
}
test "anon enum literal used in switch on union enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Foo = union(enum) {
@@ -469,8 +466,8 @@ test "switch on integer with else capturing expr" {
}
test "else prong of switch on error set excludes other cases" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -505,8 +502,8 @@ test "else prong of switch on error set excludes other cases" {
}
test "switch prongs with error set cases make a new error set type for capture value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -563,7 +560,6 @@ test "return result loc and then switch with range implicit casted to error unio
test "switch with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -582,7 +578,7 @@ test "switch with null and T peer types and inferred result location type" {
}
test "switch prongs with cases with identical payload types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -689,7 +685,7 @@ test "switch prong pointer capture alignment" {
}
test "switch on pointer type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -737,8 +733,8 @@ test "switch on error set with single else" {
}
test "switch capture copies its payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -831,6 +827,7 @@ test "comptime inline switch" {
}
test "switch capture peer type resolution" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = union(enum) {
@@ -848,6 +845,8 @@ test "switch capture peer type resolution" {
}
test "switch capture peer type resolution for in-memory coercible payloads" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -868,6 +867,8 @@ test "switch capture peer type resolution for in-memory coercible payloads" {
}
test "switch pointer capture peer type resolution" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -904,6 +905,7 @@ test "inline switch range that includes the maximum value of the switched type"
}
test "nested break ignores switch conditions and breaks instead" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -926,6 +928,7 @@ test "nested break ignores switch conditions and breaks instead" {
}
test "peer type resolution on switch captures ignores unused payload bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -951,7 +954,6 @@ test "peer type resolution on switch captures ignores unused payload bits" {
}
test "switch prong captures range" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1042,7 +1044,7 @@ test "labeled switch with break" {
}
test "unlabeled break ignores switch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1065,3 +1067,13 @@ test "switch on a signed value smaller than the smallest prong value" {
else => {},
}
}
+
+test "switch on 8-bit mod result" {
+ var x: u8 = undefined;
+ x = 16;
+ switch (x % 4) {
+ 0 => {},
+ 1, 2, 3 => return error.TestFailed,
+ else => unreachable,
+ }
+}
diff --git a/test/behavior/switch_loop.zig b/test/behavior/switch_loop.zig
index 98605692be..d2e967e4c7 100644
--- a/test/behavior/switch_loop.zig
+++ b/test/behavior/switch_loop.zig
@@ -3,7 +3,7 @@ const std = @import("std");
const expect = std.testing.expect;
test "simple switch loop" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -27,7 +27,7 @@ test "simple switch loop" {
}
test "switch loop with ranges" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -48,7 +48,7 @@ test "switch loop with ranges" {
}
test "switch loop on enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -72,7 +72,7 @@ test "switch loop on enum" {
}
test "switch loop with error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -96,7 +96,7 @@ test "switch loop with error set" {
}
test "switch loop on tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -129,7 +129,7 @@ test "switch loop on tagged union" {
}
test "switch loop dispatching instructions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -179,7 +179,7 @@ test "switch loop dispatching instructions" {
}
test "switch loop with pointer capture" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -218,11 +218,34 @@ test "switch loop with pointer capture" {
}
test "unanalyzed continue with operand" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
@setRuntimeSafety(false);
label: switch (false) {
false => if (false) continue :label true,
true => {},
}
}
+
+test "switch loop on larger than pointer integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+
+ var entry: @Type(.{ .int = .{
+ .signedness = .unsigned,
+ .bits = @bitSizeOf(usize) + 1,
+ } }) = undefined;
+ entry = 0;
+ loop: switch (entry) {
+ 0 => {
+ entry += 1;
+ continue :loop 1;
+ },
+ 1 => |x| {
+ entry += 1;
+ continue :loop x + 1;
+ },
+ 2 => entry += 1,
+ else => unreachable,
+ }
+ try expect(entry == 3);
+}
diff --git a/test/behavior/switch_on_captured_error.zig b/test/behavior/switch_on_captured_error.zig
index fcf51f6c9a..9aae1c7fbe 100644
--- a/test/behavior/switch_on_captured_error.zig
+++ b/test/behavior/switch_on_captured_error.zig
@@ -300,6 +300,7 @@ test "switch on error union catch capture" {
}
test "switch on error union if else capture" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/switch_prong_err_enum.zig b/test/behavior/switch_prong_err_enum.zig
index 8d622ed4d4..6dbc76505c 100644
--- a/test/behavior/switch_prong_err_enum.zig
+++ b/test/behavior/switch_prong_err_enum.zig
@@ -21,8 +21,8 @@ fn doThing(form_id: u64) anyerror!FormValue {
}
test "switch prong returns error enum" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/switch_prong_implicit_cast.zig b/test/behavior/switch_prong_implicit_cast.zig
index 54107bb6bd..0de343898d 100644
--- a/test/behavior/switch_prong_implicit_cast.zig
+++ b/test/behavior/switch_prong_implicit_cast.zig
@@ -15,8 +15,8 @@ fn foo(id: u64) !FormValue {
}
test "switch prong implicit cast" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = switch (foo(2) catch unreachable) {
diff --git a/test/behavior/this.zig b/test/behavior/this.zig
index 23f17c49d4..c590007322 100644
--- a/test/behavior/this.zig
+++ b/test/behavior/this.zig
@@ -26,7 +26,6 @@ test "this refer to module call private fn" {
}
test "this refer to container" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var pt: Point(i32) = undefined;
@@ -47,7 +46,6 @@ fn prev(p: ?State) void {
}
test "this used as optional function parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig
index 2221550364..e60208a264 100644
--- a/test/behavior/threadlocal.zig
+++ b/test/behavior/threadlocal.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "thread local variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,7 +24,6 @@ test "thread local variable" {
test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -40,7 +38,6 @@ threadlocal var buffer: [11]u8 = undefined;
test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index eb39faa7bb..3d3c98b865 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -65,8 +65,8 @@ test "truncate on comptime integer" {
}
test "truncate on vectors" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/try.zig b/test/behavior/try.zig
index ad376615ee..b3014ef669 100644
--- a/test/behavior/try.zig
+++ b/test/behavior/try.zig
@@ -47,7 +47,7 @@ test "try then not executed with assignment" {
}
test "`try`ing an if/else expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -68,7 +68,6 @@ test "`try`ing an if/else expression" {
}
test "'return try' of empty error set in function returning non-error" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -122,3 +121,82 @@ test "'return try' through conditional" {
comptime std.debug.assert(result == 123);
}
}
+
+test "try ptr propagation const" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+
+ const S = struct {
+ fn foo0() !u32 {
+ return 0;
+ }
+
+ fn foo1() error{Bad}!u32 {
+ return 1;
+ }
+
+ fn foo2() anyerror!u32 {
+ return 2;
+ }
+
+ fn doTheTest() !void {
+ const res0: *const u32 = &(try foo0());
+ const res1: *const u32 = &(try foo1());
+ const res2: *const u32 = &(try foo2());
+ try expect(res0.* == 0);
+ try expect(res1.* == 1);
+ try expect(res2.* == 2);
+ }
+ };
+ try S.doTheTest();
+ try comptime S.doTheTest();
+}
+
+test "try ptr propagation mutate" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+
+ const S = struct {
+ fn foo0() !u32 {
+ return 0;
+ }
+
+ fn foo1() error{Bad}!u32 {
+ return 1;
+ }
+
+ fn foo2() anyerror!u32 {
+ return 2;
+ }
+
+ fn doTheTest() !void {
+ var f0 = foo0();
+ var f1 = foo1();
+ var f2 = foo2();
+
+ const res0: *u32 = &(try f0);
+ const res1: *u32 = &(try f1);
+ const res2: *u32 = &(try f2);
+
+ res0.* += 1;
+ res1.* += 1;
+ res2.* += 1;
+
+ try expect(f0 catch unreachable == 1);
+ try expect(f1 catch unreachable == 2);
+ try expect(f2 catch unreachable == 3);
+
+ try expect(res0.* == 1);
+ try expect(res1.* == 2);
+ try expect(res2.* == 3);
+ }
+ };
+ try S.doTheTest();
+ try comptime S.doTheTest();
+}
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 891dd2726e..e760455e09 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -8,7 +8,6 @@ const expectEqual = std.testing.expectEqual;
test "tuple concatenation" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -51,7 +50,6 @@ test "tuple multiplication" {
}
test "more tuple concatenation" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -129,7 +127,6 @@ test "tuple initializer for var" {
}
test "array-like initializer for tuple types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -216,7 +213,6 @@ test "initializing anon struct with explicit type" {
}
test "fieldParentPtr of tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -229,7 +225,6 @@ test "fieldParentPtr of tuple" {
}
test "fieldParentPtr of anon struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -256,7 +251,6 @@ test "offsetOf anon struct" {
}
test "initializing tuple with mixed comptime-runtime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var x: u32 = 15;
@@ -268,7 +262,6 @@ test "initializing tuple with mixed comptime-runtime fields" {
}
test "initializing anon struct with mixed comptime-runtime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var x: u32 = 15;
@@ -280,7 +273,6 @@ test "initializing anon struct with mixed comptime-runtime fields" {
}
test "tuple in tuple passed to generic function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -300,7 +292,6 @@ test "tuple in tuple passed to generic function" {
}
test "coerce tuple to tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -315,7 +306,6 @@ test "coerce tuple to tuple" {
}
test "tuple type with void field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -353,7 +343,6 @@ test "zero sized struct in tuple handled correctly" {
}
test "tuple type with void field and a runtime field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -364,7 +353,6 @@ test "tuple type with void field and a runtime field" {
}
test "branching inside tuple literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -410,7 +398,6 @@ test "tuple of struct concatenation and coercion to array" {
}
test "nested runtime conditionals in tuple initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -446,7 +433,6 @@ test "sentinel slice in tuple" {
}
test "tuple pointer is indexable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -470,7 +456,6 @@ test "tuple pointer is indexable" {
}
test "coerce anon tuple to tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -496,14 +481,12 @@ test "empty tuple type" {
}
test "tuple with comptime fields with non empty initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const a: struct { comptime comptime_int = 0 } = .{0};
_ = a;
}
test "tuple with runtime value coerced into a slice with a sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -578,7 +561,6 @@ test "comptime fields in tuple can be initialized" {
test "empty struct in tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -591,7 +573,6 @@ test "empty struct in tuple" {
test "empty union in tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -604,6 +585,7 @@ test "empty union in tuple" {
test "field pointer of underaligned tuple" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
const T = struct { u8, u32 };
diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig
index a7dd1eee8d..643d79baa1 100644
--- a/test/behavior/tuple_declarations.zig
+++ b/test/behavior/tuple_declarations.zig
@@ -5,7 +5,6 @@ const expect = testing.expect;
const expectEqualStrings = testing.expectEqualStrings;
test "tuple declaration type info" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -34,7 +33,6 @@ test "tuple declaration type info" {
}
test "tuple declaration usage" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index a2f2afa348..58e049d896 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -200,8 +200,8 @@ test "Type.ErrorUnion" {
}
test "Type.Opaque" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -258,8 +258,8 @@ test "Type.ErrorSet" {
}
test "Type.Struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -348,7 +348,6 @@ test "Type.Struct" {
test "Type.Enum" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const Foo = @Type(.{
@@ -409,8 +408,8 @@ test "Type.Enum" {
}
test "Type.Union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const Untagged = @Type(.{
@@ -547,7 +546,6 @@ test "Type.Union from empty Type.Enum" {
test "Type.Fn" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const some_opaque = opaque {};
const some_ptr = *some_opaque;
@@ -724,7 +722,6 @@ test "@Type should resolve its children types" {
}
test "struct field names sliced at comptime from larger string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const text =
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index aea8cbb6aa..48b10c458a 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -158,7 +158,6 @@ fn testArray() !void {
}
test "type info: error set, error union info, anyerror" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -190,7 +189,6 @@ fn testErrorSet() !void {
}
test "type info: error set single value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -204,7 +202,6 @@ test "type info: error set single value" {
}
test "type info: error set merged" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -221,7 +218,6 @@ test "type info: error set merged" {
test "type info: enum info" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -362,6 +358,8 @@ test "type info: function type info" {
}
fn testFunction() !void {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
export fn typeInfoFoo() callconv(.c) usize {
unreachable;
diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig
index 33619a7573..2704e75f77 100644
--- a/test/behavior/typename.zig
+++ b/test/behavior/typename.zig
@@ -12,7 +12,6 @@ const expectStringStartsWith = std.testing.expectStringStartsWith;
// failures.
test "anon fn param" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -38,7 +37,6 @@ test "anon fn param" {
}
test "anon field init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +62,6 @@ test "anon field init" {
}
test "basic" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -86,7 +83,6 @@ test "basic" {
}
test "top level decl" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -136,7 +132,6 @@ const B = struct {
};
test "fn param" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -216,7 +211,6 @@ pub fn expectEqualStringsIgnoreDigits(expected: []const u8, actual: []const u8)
}
test "local variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -235,7 +229,6 @@ test "local variable" {
}
test "comptime parameters not converted to anytype in function type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -245,7 +238,6 @@ test "comptime parameters not converted to anytype in function type" {
}
test "anon name strategy used in sub expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig
index 9f50301ab2..361727b914 100644
--- a/test/behavior/undefined.zig
+++ b/test/behavior/undefined.zig
@@ -46,7 +46,6 @@ fn setFooX(foo: *Foo) void {
test "assign undefined to struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -63,7 +62,6 @@ test "assign undefined to struct" {
test "assign undefined to struct with method" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -89,7 +87,6 @@ test "type name of undefined" {
var buf: []u8 = undefined;
test "reslice of undefined global var slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -100,7 +97,6 @@ test "reslice of undefined global var slice" {
}
test "returned undef is 0xaa bytes when runtime safety is enabled" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 53322697af..fb05b9edbb 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -12,8 +12,8 @@ const FooWithFloats = union {
};
test "basic unions with floats" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -29,8 +29,8 @@ fn setFloat(foo: *FooWithFloats, x: f64) void {
}
test "init union with runtime value - floats" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -60,8 +60,8 @@ const Foo = union {
};
test "init union with runtime value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -161,7 +161,6 @@ test "unions embedded in aggregate types" {
test "constant tagged union with payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var empty = TaggedUnionWithPayload{ .Empty = {} };
@@ -210,8 +209,8 @@ const Payload = union(Letter) {
};
test "union with specified enum tag" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -221,8 +220,8 @@ test "union with specified enum tag" {
}
test "packed union generates correctly aligned type" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -264,7 +263,6 @@ fn testComparison() !void {
test "comparison between union and enum literal" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -280,7 +278,6 @@ const TheUnion = union(TheTag) {
};
test "cast union to tag type of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testCastUnionToTag();
@@ -301,7 +298,6 @@ test "union field access gives the enum values" {
test "cast tag type of union to union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: Value2 = Letter2.B;
@@ -317,7 +313,6 @@ const Value2 = union(Letter2) {
test "implicit cast union to its tag type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: Value2 = Letter2.B;
@@ -337,8 +332,8 @@ pub const PackThis = union(enum) {
};
test "constant packed union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -357,7 +352,6 @@ const MultipleChoice = union(enum(u32)) {
};
test "simple union(enum(u32))" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = MultipleChoice.C;
@@ -403,7 +397,6 @@ test "assigning to union with zero size field" {
test "tagged union initialization with runtime void" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(testTaggedUnionInit({}));
@@ -423,7 +416,6 @@ pub const UnionEnumNoPayloads = union(enum) { A, B };
test "tagged union with no payloads" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = UnionEnumNoPayloads{ .B = {} };
@@ -470,7 +462,6 @@ var glbl: Foo1 = undefined;
test "global union with single field is correctly initialized" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
glbl = Foo1{
@@ -487,8 +478,8 @@ pub const FooUnion = union(enum) {
var glbl_array: [2]FooUnion = undefined;
test "initialize global array of union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -500,7 +491,6 @@ test "initialize global array of union" {
test "update the tag value for zero-sized unions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = union(enum) {
@@ -515,7 +505,6 @@ test "update the tag value for zero-sized unions" {
test "union initializer generates padding only if needed" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const U = union(enum) {
@@ -528,7 +517,6 @@ test "union initializer generates padding only if needed" {
}
test "runtime tag name with single field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -543,7 +531,6 @@ test "runtime tag name with single field" {
test "method call on an empty union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -604,8 +591,8 @@ test "tagged union type" {
}
test "tagged union as return value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -621,8 +608,8 @@ fn returnAnInt(x: i32) TaggedFoo {
}
test "tagged union with all void fields but a meaningful tag" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -649,8 +636,8 @@ test "tagged union with all void fields but a meaningful tag" {
}
test "union(enum(u32)) with specified and unspecified tag values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -687,7 +674,6 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
}
test "switch on union with only 1 field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -743,7 +729,6 @@ test "union with only 1 field casted to its enum type which has enum value speci
test "@intFromEnum works on unions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Bar = union(enum) {
@@ -801,8 +786,8 @@ fn Setter(comptime attr: Attribute) type {
}
test "return union init with void payload" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -825,7 +810,7 @@ test "return union init with void payload" {
}
test "@unionInit stored to a const" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -856,7 +841,6 @@ test "@unionInit stored to a const" {
}
test "@unionInit can modify a union type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -879,7 +863,6 @@ test "@unionInit can modify a union type" {
}
test "@unionInit can modify a pointer value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -899,7 +882,6 @@ test "@unionInit can modify a pointer value" {
}
test "union no tag with struct member" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -935,8 +917,8 @@ test "extern union doesn't trigger field check at comptime" {
}
test "anonymous union literal syntax" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -964,8 +946,8 @@ test "anonymous union literal syntax" {
}
test "function call result coerces from tagged union to the tag" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1000,7 +982,6 @@ test "function call result coerces from tagged union to the tag" {
test "switching on non exhaustive union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1028,7 +1009,6 @@ test "switching on non exhaustive union" {
test "containers with single-field enums" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1057,8 +1037,8 @@ test "containers with single-field enums" {
}
test "@unionInit on union with tag but no fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1106,7 +1086,7 @@ test "union enum type gets a separate scope" {
}
test "global variable struct contains union initialized to non-most-aligned field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1133,8 +1113,8 @@ test "global variable struct contains union initialized to non-most-aligned fiel
}
test "union with no result loc initiated with a runtime value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1151,8 +1131,8 @@ test "union with no result loc initiated with a runtime value" {
}
test "union with a large struct field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1187,7 +1167,6 @@ test "comptime equality of extern unions with same tag" {
test "union tag is set when initiated as a temporary value at runtime" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1206,8 +1185,8 @@ test "union tag is set when initiated as a temporary value at runtime" {
}
test "extern union most-aligned field is smaller" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1227,7 +1206,6 @@ test "extern union most-aligned field is smaller" {
test "return an extern union from C calling convention" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1258,7 +1236,6 @@ test "return an extern union from C calling convention" {
}
test "noreturn field in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1309,7 +1286,7 @@ test "noreturn field in union" {
}
test "@unionInit uses tag value instead of field index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1339,7 +1316,6 @@ test "@unionInit uses tag value instead of field index" {
}
test "union field ptr - zero sized payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1354,7 +1330,6 @@ test "union field ptr - zero sized payload" {
}
test "union field ptr - zero sized field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1369,7 +1344,7 @@ test "union field ptr - zero sized field" {
}
test "packed union in packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1420,8 +1395,8 @@ test "union int tag type is properly managed" {
}
test "no dependency loop when function pointer in union returns the union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1442,7 +1417,7 @@ test "no dependency loop when function pointer in union returns the union" {
}
test "union reassignment can use previous value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1456,7 +1431,7 @@ test "union reassignment can use previous value" {
}
test "packed union with zero-bit field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1475,7 +1450,7 @@ test "packed union with zero-bit field" {
}
test "reinterpreting enum value inside packed union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const U = packed union {
@@ -1493,8 +1468,6 @@ test "reinterpreting enum value inside packed union" {
}
test "access the tag of a global tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const U = union(enum) {
a,
b: u8,
@@ -1504,7 +1477,7 @@ test "access the tag of a global tagged union" {
}
test "coerce enum literal to union in result loc" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const U = union(enum) {
a,
@@ -1522,7 +1495,6 @@ test "coerce enum literal to union in result loc" {
test "defined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1557,7 +1529,6 @@ test "defined-layout union field pointer has correct alignment" {
test "undefined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1590,8 +1561,8 @@ test "undefined-layout union field pointer has correct alignment" {
}
test "packed union field pointer has correct alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1624,6 +1595,7 @@ test "packed union field pointer has correct alignment" {
}
test "union with 128 bit integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ValueTag = enum { int, other };
@@ -1647,6 +1619,7 @@ test "union with 128 bit integer" {
}
test "memset extern union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = extern union {
@@ -1668,6 +1641,7 @@ test "memset extern union" {
}
test "memset packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = packed union {
@@ -1768,6 +1742,7 @@ test "reinterpret extern union" {
}
test "reinterpret packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = packed union {
@@ -1840,6 +1815,7 @@ test "reinterpret packed union" {
}
test "reinterpret packed union inside packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1945,6 +1921,8 @@ test "extern union initialized via reintepreted struct field initializer" {
}
test "packed union initialized via reintepreted struct field initializer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
const U = packed union {
@@ -1963,6 +1941,7 @@ test "packed union initialized via reintepreted struct field initializer" {
}
test "store of comptime reinterpreted memory to extern union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
@@ -1985,6 +1964,8 @@ test "store of comptime reinterpreted memory to extern union" {
}
test "store of comptime reinterpreted memory to packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
const U = packed union {
@@ -2005,7 +1986,6 @@ test "store of comptime reinterpreted memory to packed union" {
}
test "union field is a pointer to an aligned version of itself" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2019,6 +1999,7 @@ test "union field is a pointer to an aligned version of itself" {
}
test "pass register-sized field as non-register-sized union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2067,6 +2048,7 @@ test "circular dependency through pointer field of a union" {
}
test "pass nested union with rls" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2088,8 +2070,8 @@ test "pass nested union with rls" {
}
test "runtime union init, most-aligned field != largest" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2115,7 +2097,6 @@ test "runtime union init, most-aligned field != largest" {
test "copied union field doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2134,7 +2115,7 @@ test "copied union field doesn't alias source" {
}
test "create union(enum) from other union(enum)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2258,7 +2239,7 @@ test "matching captures causes union equivalence" {
}
test "signed enum tag with negative value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2315,6 +2296,7 @@ test "extern union @FieldType" {
}
test "assign global tagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = union(enum) {
diff --git a/test/behavior/union_with_members.zig b/test/behavior/union_with_members.zig
index e5e4669608..9303ac14da 100644
--- a/test/behavior/union_with_members.zig
+++ b/test/behavior/union_with_members.zig
@@ -18,7 +18,6 @@ const ET = union(enum) {
test "enum with members" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig
index b4c120effd..bd06404149 100644
--- a/test/behavior/var_args.zig
+++ b/test/behavior/var_args.zig
@@ -28,7 +28,6 @@ test "send void arg to var args" {
}
test "pass args directly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10);
@@ -41,7 +40,6 @@ fn addSomeStuff(args: anytype) i32 {
}
test "runtime parameter before var args" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect((try extraFn(10, .{})) == 0);
@@ -94,13 +92,12 @@ fn doNothingWithFirstArg(args: anytype) void {
}
test "simple variadic function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -156,13 +153,12 @@ test "simple variadic function" {
}
test "coerce reference to var arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -189,13 +185,13 @@ test "coerce reference to var arg" {
}
test "variadic functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -236,12 +232,11 @@ test "variadic functions" {
}
test "copy VaList" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -271,12 +266,11 @@ test "copy VaList" {
}
test "unused VaList arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index ee8d6711e4..d90f48d86d 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -8,7 +8,6 @@ const expectEqual = std.testing.expectEqual;
test "implicit cast vector to array - bool" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,8 +24,8 @@ test "implicit cast vector to array - bool" {
}
test "vector wrap operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -49,8 +48,8 @@ test "vector wrap operators" {
}
test "vector bin compares with mem.eql" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -74,8 +73,8 @@ test "vector bin compares with mem.eql" {
}
test "vector int operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -97,8 +96,8 @@ test "vector int operators" {
}
test "vector float operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -141,8 +140,8 @@ test "vector float operators" {
}
test "vector bit operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -173,7 +172,7 @@ test "vector bit operators" {
}
test "implicit cast vector to array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -193,7 +192,7 @@ test "implicit cast vector to array" {
}
test "array to vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -212,7 +211,7 @@ test "array to vector" {
}
test "array vector coercion - odd sizes" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -251,7 +250,7 @@ test "array vector coercion - odd sizes" {
}
test "array to vector with element type coercion" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -273,7 +272,6 @@ test "array to vector with element type coercion" {
test "peer type resolution with coercible element types" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -291,8 +289,8 @@ test "peer type resolution with coercible element types" {
}
test "tuple to vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -314,8 +312,8 @@ test "tuple to vector" {
}
test "vector casts of sizes not divisible by 8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -354,7 +352,7 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -395,7 +393,7 @@ test "vector @splat" {
}
test "load vector elements via comptime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -416,7 +414,7 @@ test "load vector elements via comptime index" {
}
test "store vector elements via comptime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -443,7 +441,6 @@ test "store vector elements via comptime index" {
}
test "load vector elements via runtime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -465,7 +462,7 @@ test "load vector elements via runtime index" {
}
test "store vector elements via runtime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -487,7 +484,6 @@ test "store vector elements via runtime index" {
}
test "initialize vector which is a struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -508,8 +504,8 @@ test "initialize vector which is a struct field" {
}
test "vector comparison operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -554,8 +550,8 @@ test "vector comparison operators" {
}
test "vector division operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -647,9 +643,9 @@ test "vector division operators" {
}
test "vector bitwise not operator" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -681,9 +677,9 @@ test "vector bitwise not operator" {
}
test "vector boolean not operator" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -705,8 +701,8 @@ test "vector boolean not operator" {
}
test "vector shift operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -776,8 +772,8 @@ test "vector shift operators" {
}
test "vector reduce operation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -908,7 +904,6 @@ test "vector reduce operation" {
test "vector @reduce comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -924,7 +919,7 @@ test "vector @reduce comptime" {
}
test "mask parameter of @shuffle is comptime scope" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -946,8 +941,8 @@ test "mask parameter of @shuffle is comptime scope" {
}
test "saturating add" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -980,8 +975,8 @@ test "saturating add" {
}
test "saturating subtraction" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1004,8 +999,8 @@ test "saturating subtraction" {
}
test "saturating multiplication" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1032,8 +1027,8 @@ test "saturating multiplication" {
}
test "saturating shift-left" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1056,8 +1051,8 @@ test "saturating shift-left" {
}
test "multiplication-assignment operator with an array operand" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1077,8 +1072,8 @@ test "multiplication-assignment operator with an array operand" {
}
test "@addWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1127,8 +1122,8 @@ test "@addWithOverflow" {
}
test "@subWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1161,8 +1156,8 @@ test "@subWithOverflow" {
}
test "@mulWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1184,8 +1179,8 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1229,8 +1224,8 @@ test "alignment of vectors" {
}
test "loading the second vector from a slice of vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1246,8 +1241,8 @@ test "loading the second vector from a slice of vectors" {
}
test "array of vectors is copied" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1270,8 +1265,8 @@ test "array of vectors is copied" {
}
test "byte vector initialized in inline function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1297,7 +1292,6 @@ test "byte vector initialized in inline function" {
test "zero divisor" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1317,7 +1311,6 @@ test "zero divisor" {
test "zero multiplicand" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1341,7 +1334,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1364,8 +1356,8 @@ test "modRem with zero divisor" {
}
test "array operands to shuffle are coerced to vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1379,7 +1371,7 @@ test "array operands to shuffle are coerced to vectors" {
}
test "load packed vector element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -1391,7 +1383,7 @@ test "load packed vector element" {
}
test "store packed vector element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -1408,7 +1400,7 @@ test "store packed vector element" {
}
test "store to vector in slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1426,7 +1418,7 @@ test "store to vector in slice" {
}
test "store vector with memset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1459,7 +1451,7 @@ test "store vector with memset" {
}
test "addition of vectors represented as strings" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const V = @Vector(3, u8);
@@ -1469,7 +1461,7 @@ test "addition of vectors represented as strings" {
}
test "compare vectors with different element types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1482,7 +1474,6 @@ test "compare vectors with different element types" {
}
test "vector pointer is indexable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1506,7 +1497,6 @@ test "vector pointer is indexable" {
}
test "boolean vector with 2 or more booleans" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1519,7 +1509,7 @@ test "boolean vector with 2 or more booleans" {
}
test "bitcast to vector with different child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1551,7 +1541,6 @@ test "index into comptime-known vector is comptime-known" {
test "arithmetic on zero-length vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@@ -1568,7 +1557,6 @@ test "arithmetic on zero-length vectors" {
test "@reduce on bool vector" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const a = @Vector(2, bool){ true, true };
@@ -1578,7 +1566,7 @@ test "@reduce on bool vector" {
}
test "bitcast vector to array of smaller vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/behavior/void.zig b/test/behavior/void.zig
index b3ccc4a124..5ee8f39dcf 100644
--- a/test/behavior/void.zig
+++ b/test/behavior/void.zig
@@ -36,7 +36,6 @@ fn times(n: usize) []const void {
test "void optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: ?void = {};
diff --git a/test/behavior/while.zig b/test/behavior/while.zig
index 9095540637..7a177d5690 100644
--- a/test/behavior/while.zig
+++ b/test/behavior/while.zig
@@ -124,8 +124,6 @@ test "while copies its payload" {
}
test "continue and break" {
- if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
-
try runContinueAndBreakTest();
try expect(continue_and_break_counter == 8);
}
@@ -209,7 +207,6 @@ test "while on bool with else result follow break prong" {
test "while on optional with else result follow else prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnNull()) |value| {
@@ -220,7 +217,6 @@ test "while on optional with else result follow else prong" {
test "while on optional with else result follow break prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnOptional(10)) |value| {
@@ -292,7 +288,6 @@ test "while bool 2 break statements and an else" {
test "while optional 2 break statements and an else" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -311,7 +306,6 @@ test "while optional 2 break statements and an else" {
test "while error 2 break statements and an else" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -349,7 +343,6 @@ test "else continue outer while" {
test "try terminating an infinite loop" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
// Test coverage for https://github.com/ziglang/zig/issues/13546
@@ -376,7 +369,6 @@ test "while loop with comptime true condition needs no else block to return valu
}
test "int returned from switch in while" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 3;
@@ -389,7 +381,6 @@ test "int returned from switch in while" {
test "breaking from a loop in an if statement" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig
index 9219b3700f..62489c5cd7 100644
--- a/test/behavior/widening.zig
+++ b/test/behavior/widening.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const builtin = @import("builtin");
test "integer widening" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -29,7 +28,6 @@ test "integer widening u0 to u8" {
}
test "implicit unsigned integer to signed integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -40,7 +38,6 @@ test "implicit unsigned integer to signed integer" {
}
test "float widening" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -60,7 +57,6 @@ test "float widening" {
}
test "float widening f16 to f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -73,7 +69,6 @@ test "float widening f16 to f128" {
}
test "cast small unsigned to larger signed" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/c_import/macros.zig b/test/c_import/macros.zig
index 9f7160af64..2efdeb4672 100644
--- a/test/c_import/macros.zig
+++ b/test/c_import/macros.zig
@@ -25,7 +25,6 @@ test "casting to void with a macro" {
test "initializer list expression" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -38,7 +37,6 @@ test "initializer list expression" {
}
test "sizeof in macros" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -55,7 +53,6 @@ test "reference to a struct type" {
test "cast negative integer to pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +61,6 @@ test "cast negative integer to pointer" {
test "casting to union with a macro" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,7 +76,6 @@ test "casting to union with a macro" {
test "casting or calling a value with a paren-surrounded macro" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -99,7 +94,6 @@ test "casting or calling a value with a paren-surrounded macro" {
test "nested comma operator" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -109,7 +103,6 @@ test "nested comma operator" {
test "cast functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -123,7 +116,6 @@ test "cast functions" {
test "large integer macro" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,7 +124,6 @@ test "large integer macro" {
test "string literal macro with embedded tab character" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -141,7 +132,6 @@ test "string literal macro with embedded tab character" {
test "string and char literals that are not UTF-8 encoded. Issue #12784" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -152,7 +142,6 @@ test "string and char literals that are not UTF-8 encoded. Issue #12784" {
test "Macro that uses division operator. Issue #13162" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -196,7 +185,6 @@ test "Macro that uses division operator. Issue #13162" {
test "Macro that uses remainder operator. Issue #13346" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
diff --git a/test/cases/array_in_anon_struct.zig b/test/cases/array_in_anon_struct.zig
index 5961b3f723..8c4f5ea051 100644
--- a/test/cases/array_in_anon_struct.zig
+++ b/test/cases/array_in_anon_struct.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig b/test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig
index 8bbc3154a1..5f42b7d9af 100644
--- a/test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig
+++ b/test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig
@@ -7,5 +7,5 @@ export fn entry3() callconv(.avr_interrupt) void {}
// target=aarch64-linux-none
//
// :1:30: error: calling convention 'x86_64_interrupt' only available on architectures 'x86_64'
-// :1:30: error: calling convention 'x86_interrupt' only available on architectures 'x86'
-// :1:30: error: calling convention 'avr_interrupt' only available on architectures 'avr'
+// :2:30: error: calling convention 'x86_interrupt' only available on architectures 'x86'
+// :3:30: error: calling convention 'avr_interrupt' only available on architectures 'avr'
diff --git a/test/cases/compile_errors/error_set_membership.zig b/test/cases/compile_errors/error_set_membership.zig
index 67826f4db9..a146bd39ba 100644
--- a/test/cases/compile_errors/error_set_membership.zig
+++ b/test/cases/compile_errors/error_set_membership.zig
@@ -25,7 +25,7 @@ pub fn main() Error!void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :23:29: error: expected type 'error{InvalidCharacter}', found '@typeInfo(@typeInfo(@TypeOf(tmp.fooey)).@"fn".return_type.?).error_union.error_set'
// :23:29: note: 'error.InvalidDirection' not a member of destination error set
diff --git a/test/cases/compile_errors/function_ptr_alignment.zig b/test/cases/compile_errors/function_ptr_alignment.zig
index cf97e61f40..fd8aec06d0 100644
--- a/test/cases/compile_errors/function_ptr_alignment.zig
+++ b/test/cases/compile_errors/function_ptr_alignment.zig
@@ -10,7 +10,7 @@ comptime {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :8:41: error: expected type '*align(2) const fn () void', found '*const fn () void'
// :8:41: note: pointer alignment '1' cannot cast into pointer alignment '2'
diff --git a/test/cases/compile_errors/issue_15572_break_on_inline_while.zig b/test/cases/compile_errors/issue_15572_break_on_inline_while.zig
index f264e695c0..69d5c11eab 100644
--- a/test/cases/compile_errors/issue_15572_break_on_inline_while.zig
+++ b/test/cases/compile_errors/issue_15572_break_on_inline_while.zig
@@ -15,6 +15,6 @@ pub fn main() void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :9:28: error: incompatible types: 'builtin.Type.EnumField' and 'void'
diff --git a/test/cases/compile_errors/switch_on_non_err_union.zig b/test/cases/compile_errors/switch_on_non_err_union.zig
index 87624b21dc..e79a181e62 100644
--- a/test/cases/compile_errors/switch_on_non_err_union.zig
+++ b/test/cases/compile_errors/switch_on_non_err_union.zig
@@ -6,6 +6,6 @@ pub fn main() void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :2:23: error: expected error union type, found 'bool'
diff --git a/test/cases/pic_freestanding.zig b/test/cases/pic_freestanding.zig
index 86e37662e2..eda1399887 100644
--- a/test/cases/pic_freestanding.zig
+++ b/test/cases/pic_freestanding.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const std = @import("std");
-fn _start() callconv(.naked) void {}
+pub fn _start() callconv(.naked) void {}
comptime {
@export(&_start, .{ .name = if (builtin.cpu.arch.isMIPS()) "__start" else "_start" });
diff --git a/test/cases/safety/@alignCast misaligned.zig b/test/cases/safety/@alignCast misaligned.zig
index e523a9d120..017c46a98d 100644
--- a/test/cases/safety/@alignCast misaligned.zig
+++ b/test/cases/safety/@alignCast misaligned.zig
@@ -22,4 +22,4 @@ fn foo(bytes: []u8) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@enumFromInt - no matching tag value.zig b/test/cases/safety/@enumFromInt - no matching tag value.zig
index 0021a4d397..7953b93358 100644
--- a/test/cases/safety/@enumFromInt - no matching tag value.zig
+++ b/test/cases/safety/@enumFromInt - no matching tag value.zig
@@ -23,4 +23,4 @@ fn baz(_: Foo) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@enumFromInt truncated bits - exhaustive.zig b/test/cases/safety/@enumFromInt truncated bits - exhaustive.zig
index 92065c4892..ace1e08d11 100644
--- a/test/cases/safety/@enumFromInt truncated bits - exhaustive.zig
+++ b/test/cases/safety/@enumFromInt truncated bits - exhaustive.zig
@@ -20,4 +20,4 @@ pub fn main() u8 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig b/test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig
index 25959c9ffd..8f20081610 100644
--- a/test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig
+++ b/test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig
@@ -20,4 +20,4 @@ pub fn main() u8 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@errorCast error not present in destination.zig b/test/cases/safety/@errorCast error not present in destination.zig
index 74e81f2a60..a121d3e6e8 100644
--- a/test/cases/safety/@errorCast error not present in destination.zig
+++ b/test/cases/safety/@errorCast error not present in destination.zig
@@ -18,4 +18,4 @@ fn foo(set1: Set1) Set2 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@errorCast error union casted to disjoint set.zig b/test/cases/safety/@errorCast error union casted to disjoint set.zig
index 2696a037c7..a84f61d8e5 100644
--- a/test/cases/safety/@errorCast error union casted to disjoint set.zig
+++ b/test/cases/safety/@errorCast error union casted to disjoint set.zig
@@ -17,4 +17,4 @@ fn foo() anyerror!i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intCast to u0.zig b/test/cases/safety/@intCast to u0.zig
index 4394f63f54..219f42f213 100644
--- a/test/cases/safety/@intCast to u0.zig
+++ b/test/cases/safety/@intCast to u0.zig
@@ -19,4 +19,4 @@ fn bar(one: u1, not_zero: i32) void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig
index 38ec595b45..70f0cebb93 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig
index 97a651855b..bc35aa6e23 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig
index cc19ee84ff..56e87423a1 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig
index abc95e396a..61704a8733 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig
index f488d0291f..361a528498 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig
index 8d459e1a5c..5706d192d5 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig
index 95122abc8c..842aaaa1de 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig
index fbc7cf18ac..c1e8af2f5e 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig
index 35b4c91509..96e1c5594e 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig b/test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig
index 94ad509772..cf17014d3f 100644
--- a/test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig
index 80edbdfd3c..23d9f87ac1 100644
--- a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) i8 {
fn baz(_: i8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig
index ee0c040273..9d28ee0aaa 100644
--- a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) u8 {
fn baz(_: u8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig
index c526a70047..2e76a9b253 100644
--- a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig
+++ b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) u8 {
fn baz(_: u8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig
index 4944a239e2..eb45f357dd 100644
--- a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig
+++ b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig
index a217de3073..308f97ad12 100644
--- a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig
+++ b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@ptrFromInt with misaligned address.zig b/test/cases/safety/@ptrFromInt with misaligned address.zig
index b95c1b320f..1383a4c3c3 100644
--- a/test/cases/safety/@ptrFromInt with misaligned address.zig
+++ b/test/cases/safety/@ptrFromInt with misaligned address.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/@tagName on corrupted enum value.zig b/test/cases/safety/@tagName on corrupted enum value.zig
index df6a3f45e0..450d0ee2e0 100644
--- a/test/cases/safety/@tagName on corrupted enum value.zig
+++ b/test/cases/safety/@tagName on corrupted enum value.zig
@@ -23,4 +23,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/@tagName on corrupted union value.zig b/test/cases/safety/@tagName on corrupted union value.zig
index 7b856e57b9..b61a72420e 100644
--- a/test/cases/safety/@tagName on corrupted union value.zig
+++ b/test/cases/safety/@tagName on corrupted union value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/array slice sentinel mismatch vector.zig b/test/cases/safety/array slice sentinel mismatch vector.zig
index 55ff4b3e39..f374f1b9d5 100644
--- a/test/cases/safety/array slice sentinel mismatch vector.zig
+++ b/test/cases/safety/array slice sentinel mismatch vector.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/array slice sentinel mismatch.zig b/test/cases/safety/array slice sentinel mismatch.zig
index ab7a513b39..deb43250ec 100644
--- a/test/cases/safety/array slice sentinel mismatch.zig
+++ b/test/cases/safety/array slice sentinel mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/bad union field access.zig b/test/cases/safety/bad union field access.zig
index 14ebb1f344..a2778237c4 100644
--- a/test/cases/safety/bad union field access.zig
+++ b/test/cases/safety/bad union field access.zig
@@ -24,4 +24,4 @@ fn bar(f: *Foo) void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/calling panic.zig b/test/cases/safety/calling panic.zig
index 7b8a478be3..7ac512eadc 100644
--- a/test/cases/safety/calling panic.zig
+++ b/test/cases/safety/calling panic.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/cast []u8 to bigger slice of wrong size.zig b/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
index b6b8e89bf9..65dda78751 100644
--- a/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
+++ b/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
@@ -18,4 +18,4 @@ fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/cast integer to global error and no code matches.zig b/test/cases/safety/cast integer to global error and no code matches.zig
index fa0474a88c..2b9cadf811 100644
--- a/test/cases/safety/cast integer to global error and no code matches.zig
+++ b/test/cases/safety/cast integer to global error and no code matches.zig
@@ -16,4 +16,4 @@ fn bar(x: u16) anyerror {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/empty slice with sentinel out of bounds.zig b/test/cases/safety/empty slice with sentinel out of bounds.zig
index 51846f894f..2d9494826d 100644
--- a/test/cases/safety/empty slice with sentinel out of bounds.zig
+++ b/test/cases/safety/empty slice with sentinel out of bounds.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/exact division failure - vectors.zig b/test/cases/safety/exact division failure - vectors.zig
index 30d5dcf11a..398ae7a4cd 100644
--- a/test/cases/safety/exact division failure - vectors.zig
+++ b/test/cases/safety/exact division failure - vectors.zig
@@ -20,4 +20,4 @@ fn divExact(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/exact division failure.zig b/test/cases/safety/exact division failure.zig
index be86853c77..0831bb4e09 100644
--- a/test/cases/safety/exact division failure.zig
+++ b/test/cases/safety/exact division failure.zig
@@ -18,4 +18,4 @@ fn divExact(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/for_len_mismatch.zig b/test/cases/safety/for_len_mismatch.zig
index 8841f11aa7..55bb7bf8b5 100644
--- a/test/cases/safety/for_len_mismatch.zig
+++ b/test/cases/safety/for_len_mismatch.zig
@@ -22,4 +22,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/for_len_mismatch_three.zig b/test/cases/safety/for_len_mismatch_three.zig
index 4efe18d3cd..b4256b0eee 100644
--- a/test/cases/safety/for_len_mismatch_three.zig
+++ b/test/cases/safety/for_len_mismatch_three.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/ignored expression integer overflow.zig b/test/cases/safety/ignored expression integer overflow.zig
index 1089010854..859c615e42 100644
--- a/test/cases/safety/ignored expression integer overflow.zig
+++ b/test/cases/safety/ignored expression integer overflow.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/integer addition overflow.zig b/test/cases/safety/integer addition overflow.zig
index 499e8b1015..119800c686 100644
--- a/test/cases/safety/integer addition overflow.zig
+++ b/test/cases/safety/integer addition overflow.zig
@@ -20,4 +20,4 @@ fn add(a: u16, b: u16) u16 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/integer division by zero - vectors.zig b/test/cases/safety/integer division by zero - vectors.zig
index 63e77a0dd4..d3ddfa06c6 100644
--- a/test/cases/safety/integer division by zero - vectors.zig
+++ b/test/cases/safety/integer division by zero - vectors.zig
@@ -19,4 +19,4 @@ fn div0(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/integer division by zero.zig b/test/cases/safety/integer division by zero.zig
index e8eba5c4f0..dc12dde343 100644
--- a/test/cases/safety/integer division by zero.zig
+++ b/test/cases/safety/integer division by zero.zig
@@ -17,4 +17,4 @@ fn div0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/integer multiplication overflow.zig b/test/cases/safety/integer multiplication overflow.zig
index f7f4148a15..4380ec6d51 100644
--- a/test/cases/safety/integer multiplication overflow.zig
+++ b/test/cases/safety/integer multiplication overflow.zig
@@ -18,4 +18,4 @@ fn mul(a: u16, b: u16) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/integer negation overflow.zig b/test/cases/safety/integer negation overflow.zig
index cfdfed0429..1c6610ae6f 100644
--- a/test/cases/safety/integer negation overflow.zig
+++ b/test/cases/safety/integer negation overflow.zig
@@ -18,4 +18,4 @@ fn neg(a: i16) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/integer subtraction overflow.zig b/test/cases/safety/integer subtraction overflow.zig
index 14e9131c3b..9211c877e4 100644
--- a/test/cases/safety/integer subtraction overflow.zig
+++ b/test/cases/safety/integer subtraction overflow.zig
@@ -18,4 +18,4 @@ fn sub(a: u16, b: u16) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memcpy_alias.zig b/test/cases/safety/memcpy_alias.zig
index f7a1a16024..62c30ec459 100644
--- a/test/cases/safety/memcpy_alias.zig
+++ b/test/cases/safety/memcpy_alias.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memcpy_len_mismatch.zig b/test/cases/safety/memcpy_len_mismatch.zig
index 0ef22b959c..aa9b3fd63f 100644
--- a/test/cases/safety/memcpy_len_mismatch.zig
+++ b/test/cases/safety/memcpy_len_mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memmove_len_mismatch.zig b/test/cases/safety/memmove_len_mismatch.zig
index 881af9f336..fa22597122 100644
--- a/test/cases/safety/memmove_len_mismatch.zig
+++ b/test/cases/safety/memmove_len_mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memset_array_undefined_bytes.zig b/test/cases/safety/memset_array_undefined_bytes.zig
index 20a65d65d6..47865a8def 100644
--- a/test/cases/safety/memset_array_undefined_bytes.zig
+++ b/test/cases/safety/memset_array_undefined_bytes.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memset_array_undefined_large.zig b/test/cases/safety/memset_array_undefined_large.zig
index a52bfecbf0..10f57521cf 100644
--- a/test/cases/safety/memset_array_undefined_large.zig
+++ b/test/cases/safety/memset_array_undefined_large.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memset_slice_undefined_bytes.zig b/test/cases/safety/memset_slice_undefined_bytes.zig
index fb67999306..4d76bbc414 100644
--- a/test/cases/safety/memset_slice_undefined_bytes.zig
+++ b/test/cases/safety/memset_slice_undefined_bytes.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/memset_slice_undefined_large.zig b/test/cases/safety/memset_slice_undefined_large.zig
index 166557240c..e404e35226 100644
--- a/test/cases/safety/memset_slice_undefined_large.zig
+++ b/test/cases/safety/memset_slice_undefined_large.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/modrem by zero.zig b/test/cases/safety/modrem by zero.zig
index 35b7e37e3a..fac10065ed 100644
--- a/test/cases/safety/modrem by zero.zig
+++ b/test/cases/safety/modrem by zero.zig
@@ -17,4 +17,4 @@ fn div0(a: u32, b: u32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/modulus by zero.zig b/test/cases/safety/modulus by zero.zig
index cdeab00dbc..1c0c8ba3a9 100644
--- a/test/cases/safety/modulus by zero.zig
+++ b/test/cases/safety/modulus by zero.zig
@@ -17,4 +17,4 @@ fn mod0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/noreturn returned.zig b/test/cases/safety/noreturn returned.zig
index c92fb08e62..b91a6def49 100644
--- a/test/cases/safety/noreturn returned.zig
+++ b/test/cases/safety/noreturn returned.zig
@@ -20,4 +20,4 @@ pub fn main() void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/optional unwrap operator on C pointer.zig b/test/cases/safety/optional unwrap operator on C pointer.zig
index 98135cfae4..4deb62bc25 100644
--- a/test/cases/safety/optional unwrap operator on C pointer.zig
+++ b/test/cases/safety/optional unwrap operator on C pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/optional unwrap operator on null pointer.zig b/test/cases/safety/optional unwrap operator on null pointer.zig
index 6ac54e6bd0..97d07626f5 100644
--- a/test/cases/safety/optional unwrap operator on null pointer.zig
+++ b/test/cases/safety/optional unwrap operator on null pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/optional_empty_error_set.zig b/test/cases/safety/optional_empty_error_set.zig
index dbe39d00c3..1ee1690d51 100644
--- a/test/cases/safety/optional_empty_error_set.zig
+++ b/test/cases/safety/optional_empty_error_set.zig
@@ -19,4 +19,4 @@ fn foo() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/out of bounds array slice by length.zig b/test/cases/safety/out of bounds array slice by length.zig
index 325749a5eb..df613b1d53 100644
--- a/test/cases/safety/out of bounds array slice by length.zig
+++ b/test/cases/safety/out of bounds array slice by length.zig
@@ -17,4 +17,4 @@ fn foo(a: u32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/out of bounds slice access.zig b/test/cases/safety/out of bounds slice access.zig
index f4f34a203f..d5ebf6d531 100644
--- a/test/cases/safety/out of bounds slice access.zig
+++ b/test/cases/safety/out of bounds slice access.zig
@@ -18,4 +18,4 @@ fn bar(a: []const i32) i32 {
fn baz(_: i32) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/pointer casting null to non-optional pointer.zig b/test/cases/safety/pointer casting null to non-optional pointer.zig
index 33da071e73..ef61f162b4 100644
--- a/test/cases/safety/pointer casting null to non-optional pointer.zig
+++ b/test/cases/safety/pointer casting null to non-optional pointer.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/pointer casting to null function pointer.zig b/test/cases/safety/pointer casting to null function pointer.zig
index a57e71cb8f..1ce1ebc266 100644
--- a/test/cases/safety/pointer casting to null function pointer.zig
+++ b/test/cases/safety/pointer casting to null function pointer.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/pointer slice sentinel mismatch.zig b/test/cases/safety/pointer slice sentinel mismatch.zig
index 519b04b916..a400c0bc35 100644
--- a/test/cases/safety/pointer slice sentinel mismatch.zig
+++ b/test/cases/safety/pointer slice sentinel mismatch.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/remainder division by zero.zig b/test/cases/safety/remainder division by zero.zig
index 2d938a2fd6..3749c3d5d7 100644
--- a/test/cases/safety/remainder division by zero.zig
+++ b/test/cases/safety/remainder division by zero.zig
@@ -17,4 +17,4 @@ fn rem0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/shift left by huge amount.zig b/test/cases/safety/shift left by huge amount.zig
index 374b03d123..b6b88ba870 100644
--- a/test/cases/safety/shift left by huge amount.zig
+++ b/test/cases/safety/shift left by huge amount.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/shift right by huge amount.zig b/test/cases/safety/shift right by huge amount.zig
index 173e6fcd7e..664e2b5473 100644
--- a/test/cases/safety/shift right by huge amount.zig
+++ b/test/cases/safety/shift right by huge amount.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/signed integer division overflow - vectors.zig b/test/cases/safety/signed integer division overflow - vectors.zig
index 0de062094d..7a696c4b4d 100644
--- a/test/cases/safety/signed integer division overflow - vectors.zig
+++ b/test/cases/safety/signed integer division overflow - vectors.zig
@@ -20,4 +20,4 @@ fn div(a: @Vector(4, i16), b: @Vector(4, i16)) @Vector(4, i16) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/signed integer division overflow.zig b/test/cases/safety/signed integer division overflow.zig
index 0d67f72649..acbb3d4e16 100644
--- a/test/cases/safety/signed integer division overflow.zig
+++ b/test/cases/safety/signed integer division overflow.zig
@@ -18,4 +18,4 @@ fn div(a: i16, b: i16) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig
index fa0eec94c0..f47083d4df 100644
--- a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig
+++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig
index 6ce662cdc7..881b3c1631 100644
--- a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig
+++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig
@@ -17,4 +17,4 @@ fn unsigned_cast(x: i32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/signed shift left overflow.zig b/test/cases/safety/signed shift left overflow.zig
index 54a51e0ccd..0cab01ca02 100644
--- a/test/cases/safety/signed shift left overflow.zig
+++ b/test/cases/safety/signed shift left overflow.zig
@@ -18,4 +18,4 @@ fn shl(a: i16, b: u4) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/signed shift right overflow.zig b/test/cases/safety/signed shift right overflow.zig
index 1a0c5973c9..9fe3fe7873 100644
--- a/test/cases/safety/signed shift right overflow.zig
+++ b/test/cases/safety/signed shift right overflow.zig
@@ -18,4 +18,4 @@ fn shr(a: i16, b: u4) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/signed-unsigned vector cast.zig b/test/cases/safety/signed-unsigned vector cast.zig
index 919562b06c..22d4073694 100644
--- a/test/cases/safety/signed-unsigned vector cast.zig
+++ b/test/cases/safety/signed-unsigned vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/slice by length sentinel mismatch on lhs.zig b/test/cases/safety/slice by length sentinel mismatch on lhs.zig
index c66a968d4b..85785ce769 100644
--- a/test/cases/safety/slice by length sentinel mismatch on lhs.zig
+++ b/test/cases/safety/slice by length sentinel mismatch on lhs.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice by length sentinel mismatch on rhs.zig b/test/cases/safety/slice by length sentinel mismatch on rhs.zig
index a4a2189a9c..64fe818d1e 100644
--- a/test/cases/safety/slice by length sentinel mismatch on rhs.zig
+++ b/test/cases/safety/slice by length sentinel mismatch on rhs.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice sentinel mismatch - floats.zig b/test/cases/safety/slice sentinel mismatch - floats.zig
index be63272f0c..b31855ab42 100644
--- a/test/cases/safety/slice sentinel mismatch - floats.zig
+++ b/test/cases/safety/slice sentinel mismatch - floats.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/slice sentinel mismatch - optional pointers.zig b/test/cases/safety/slice sentinel mismatch - optional pointers.zig
index 38ab78b2c1..4337fe448d 100644
--- a/test/cases/safety/slice sentinel mismatch - optional pointers.zig
+++ b/test/cases/safety/slice sentinel mismatch - optional pointers.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice slice sentinel mismatch.zig b/test/cases/safety/slice slice sentinel mismatch.zig
index 51d4c16596..76224f966d 100644
--- a/test/cases/safety/slice slice sentinel mismatch.zig
+++ b/test/cases/safety/slice slice sentinel mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice start index greater than end index.zig b/test/cases/safety/slice start index greater than end index.zig
index a6dde3ac63..684020b8a7 100644
--- a/test/cases/safety/slice start index greater than end index.zig
+++ b/test/cases/safety/slice start index greater than end index.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice with sentinel out of bounds - runtime len.zig b/test/cases/safety/slice with sentinel out of bounds - runtime len.zig
index 7039f541e3..b9ef281144 100644
--- a/test/cases/safety/slice with sentinel out of bounds - runtime len.zig
+++ b/test/cases/safety/slice with sentinel out of bounds - runtime len.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice with sentinel out of bounds.zig b/test/cases/safety/slice with sentinel out of bounds.zig
index 8439e8c737..f07d393a0e 100644
--- a/test/cases/safety/slice with sentinel out of bounds.zig
+++ b/test/cases/safety/slice with sentinel out of bounds.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice_cast_change_len_0.zig b/test/cases/safety/slice_cast_change_len_0.zig
index d32bdfc920..96d94cfad5 100644
--- a/test/cases/safety/slice_cast_change_len_0.zig
+++ b/test/cases/safety/slice_cast_change_len_0.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice_cast_change_len_1.zig b/test/cases/safety/slice_cast_change_len_1.zig
index 5d3728bcdc..21a1d69558 100644
--- a/test/cases/safety/slice_cast_change_len_1.zig
+++ b/test/cases/safety/slice_cast_change_len_1.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slice_cast_change_len_2.zig b/test/cases/safety/slice_cast_change_len_2.zig
index 3a25d27504..5da254e903 100644
--- a/test/cases/safety/slice_cast_change_len_2.zig
+++ b/test/cases/safety/slice_cast_change_len_2.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slicing null C pointer - runtime len.zig b/test/cases/safety/slicing null C pointer - runtime len.zig
index 763553b04a..831224edee 100644
--- a/test/cases/safety/slicing null C pointer - runtime len.zig
+++ b/test/cases/safety/slicing null C pointer - runtime len.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/slicing null C pointer.zig b/test/cases/safety/slicing null C pointer.zig
index a928fd585f..53da877c59 100644
--- a/test/cases/safety/slicing null C pointer.zig
+++ b/test/cases/safety/slicing null C pointer.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/switch else on corrupt enum value - one prong.zig b/test/cases/safety/switch else on corrupt enum value - one prong.zig
index 73f6ed9dc8..b2ef933080 100644
--- a/test/cases/safety/switch else on corrupt enum value - one prong.zig
+++ b/test/cases/safety/switch else on corrupt enum value - one prong.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/switch else on corrupt enum value - union.zig b/test/cases/safety/switch else on corrupt enum value - union.zig
index 77dacd86c6..933f7995a5 100644
--- a/test/cases/safety/switch else on corrupt enum value - union.zig
+++ b/test/cases/safety/switch else on corrupt enum value - union.zig
@@ -26,4 +26,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/switch else on corrupt enum value.zig b/test/cases/safety/switch else on corrupt enum value.zig
index 228e3c70ec..300de27e93 100644
--- a/test/cases/safety/switch else on corrupt enum value.zig
+++ b/test/cases/safety/switch else on corrupt enum value.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/switch on corrupted enum value.zig b/test/cases/safety/switch on corrupted enum value.zig
index 4d46d2e7a7..74ec3a4057 100644
--- a/test/cases/safety/switch on corrupted enum value.zig
+++ b/test/cases/safety/switch on corrupted enum value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/switch on corrupted union value.zig b/test/cases/safety/switch on corrupted union value.zig
index 0f622dcbd8..cede4feb04 100644
--- a/test/cases/safety/switch on corrupted union value.zig
+++ b/test/cases/safety/switch on corrupted union value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/truncating vector cast.zig b/test/cases/safety/truncating vector cast.zig
index 9b222e6918..f6271a094e 100644
--- a/test/cases/safety/truncating vector cast.zig
+++ b/test/cases/safety/truncating vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/unreachable.zig b/test/cases/safety/unreachable.zig
index fc1e886540..1094123cba 100644
--- a/test/cases/safety/unreachable.zig
+++ b/test/cases/safety/unreachable.zig
@@ -12,4 +12,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig
index 185cde9973..7f27c5fcd5 100644
--- a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig
+++ b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/unsigned shift left overflow.zig b/test/cases/safety/unsigned shift left overflow.zig
index e2f58f0f3b..1098a80c8e 100644
--- a/test/cases/safety/unsigned shift left overflow.zig
+++ b/test/cases/safety/unsigned shift left overflow.zig
@@ -18,4 +18,4 @@ fn shl(a: u16, b: u4) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/unsigned shift right overflow.zig b/test/cases/safety/unsigned shift right overflow.zig
index 6ded52098d..e9ad8571b6 100644
--- a/test/cases/safety/unsigned shift right overflow.zig
+++ b/test/cases/safety/unsigned shift right overflow.zig
@@ -18,4 +18,4 @@ fn shr(a: u16, b: u4) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/unsigned-signed vector cast.zig b/test/cases/safety/unsigned-signed vector cast.zig
index 6501643b36..5b3b58d928 100644
--- a/test/cases/safety/unsigned-signed vector cast.zig
+++ b/test/cases/safety/unsigned-signed vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/unwrap error switch.zig b/test/cases/safety/unwrap error switch.zig
index b3194bd2e0..a1a148cfd9 100644
--- a/test/cases/safety/unwrap error switch.zig
+++ b/test/cases/safety/unwrap error switch.zig
@@ -18,4 +18,4 @@ fn bar() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/unwrap error.zig b/test/cases/safety/unwrap error.zig
index 9fe7d437bc..dd157c8721 100644
--- a/test/cases/safety/unwrap error.zig
+++ b/test/cases/safety/unwrap error.zig
@@ -16,4 +16,4 @@ fn bar() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/value does not fit in shortening cast - u0.zig b/test/cases/safety/value does not fit in shortening cast - u0.zig
index f29df8d8af..9d77b3f1c8 100644
--- a/test/cases/safety/value does not fit in shortening cast - u0.zig
+++ b/test/cases/safety/value does not fit in shortening cast - u0.zig
@@ -18,4 +18,4 @@ fn shorten_cast(x: u8) u0 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/value does not fit in shortening cast.zig b/test/cases/safety/value does not fit in shortening cast.zig
index 415ac95dbb..9b6af39967 100644
--- a/test/cases/safety/value does not fit in shortening cast.zig
+++ b/test/cases/safety/value does not fit in shortening cast.zig
@@ -18,4 +18,4 @@ fn shorten_cast(x: i32) i8 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/safety/vector integer addition overflow.zig b/test/cases/safety/vector integer addition overflow.zig
index db08d8b241..64f6e238e0 100644
--- a/test/cases/safety/vector integer addition overflow.zig
+++ b/test/cases/safety/vector integer addition overflow.zig
@@ -19,4 +19,4 @@ fn add(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/vector integer multiplication overflow.zig b/test/cases/safety/vector integer multiplication overflow.zig
index 61176fd482..69d0dde16d 100644
--- a/test/cases/safety/vector integer multiplication overflow.zig
+++ b/test/cases/safety/vector integer multiplication overflow.zig
@@ -19,4 +19,4 @@ fn mul(a: @Vector(4, u8), b: @Vector(4, u8)) @Vector(4, u8) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/vector integer negation overflow.zig b/test/cases/safety/vector integer negation overflow.zig
index f1f36ff294..72182cfdfc 100644
--- a/test/cases/safety/vector integer negation overflow.zig
+++ b/test/cases/safety/vector integer negation overflow.zig
@@ -19,4 +19,4 @@ fn neg(a: @Vector(4, i16)) @Vector(4, i16) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/vector integer subtraction overflow.zig b/test/cases/safety/vector integer subtraction overflow.zig
index 9ba942469c..8ac2c6d756 100644
--- a/test/cases/safety/vector integer subtraction overflow.zig
+++ b/test/cases/safety/vector integer subtraction overflow.zig
@@ -19,4 +19,4 @@ fn sub(a: @Vector(4, u32), b: @Vector(4, u32)) @Vector(4, u32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/cases/safety/zero casted to error.zig b/test/cases/safety/zero casted to error.zig
index 1ffa995260..7a02ec2b71 100644
--- a/test/cases/safety/zero casted to error.zig
+++ b/test/cases/safety/zero casted to error.zig
@@ -16,4 +16,4 @@ fn bar(x: u16) anyerror {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
diff --git a/test/cases/taking_pointer_of_global_tagged_union.zig b/test/cases/taking_pointer_of_global_tagged_union.zig
index accb22667d..adc71d81c0 100644
--- a/test/cases/taking_pointer_of_global_tagged_union.zig
+++ b/test/cases/taking_pointer_of_global_tagged_union.zig
@@ -23,4 +23,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
diff --git a/test/incremental/fix_many_errors b/test/incremental/fix_many_errors
deleted file mode 100644
index 1d9446022c..0000000000
--- a/test/incremental/fix_many_errors
+++ /dev/null
@@ -1,71 +0,0 @@
-#target=x86_64-linux-selfhosted
-#target=x86_64-linux-cbe
-#target=x86_64-windows-cbe
-#update=initial version
-#file=main.zig
-pub fn main() !void {}
-comptime { @compileError("c0"); }
-comptime { @compileError("c1"); }
-comptime { @compileError("c2"); }
-comptime { @compileError("c3"); }
-comptime { @compileError("c4"); }
-comptime { @compileError("c5"); }
-comptime { @compileError("c6"); }
-comptime { @compileError("c7"); }
-comptime { @compileError("c8"); }
-comptime { @compileError("c9"); }
-export fn f0() void { @compileError("f0"); }
-export fn f1() void { @compileError("f1"); }
-export fn f2() void { @compileError("f2"); }
-export fn f3() void { @compileError("f3"); }
-export fn f4() void { @compileError("f4"); }
-export fn f5() void { @compileError("f5"); }
-export fn f6() void { @compileError("f6"); }
-export fn f7() void { @compileError("f7"); }
-export fn f8() void { @compileError("f8"); }
-export fn f9() void { @compileError("f9"); }
-#expect_error=main.zig:2:12: error: c0
-#expect_error=main.zig:3:12: error: c1
-#expect_error=main.zig:4:12: error: c2
-#expect_error=main.zig:5:12: error: c3
-#expect_error=main.zig:6:12: error: c4
-#expect_error=main.zig:7:12: error: c5
-#expect_error=main.zig:8:12: error: c6
-#expect_error=main.zig:9:12: error: c7
-#expect_error=main.zig:10:12: error: c8
-#expect_error=main.zig:11:12: error: c9
-#expect_error=main.zig:12:23: error: f0
-#expect_error=main.zig:13:23: error: f1
-#expect_error=main.zig:14:23: error: f2
-#expect_error=main.zig:15:23: error: f3
-#expect_error=main.zig:16:23: error: f4
-#expect_error=main.zig:17:23: error: f5
-#expect_error=main.zig:18:23: error: f6
-#expect_error=main.zig:19:23: error: f7
-#expect_error=main.zig:20:23: error: f8
-#expect_error=main.zig:21:23: error: f9
-#update=fix all the errors
-#file=main.zig
-pub fn main() !void {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-comptime {}
-export fn f0() void {}
-export fn f1() void {}
-export fn f2() void {}
-export fn f3() void {}
-export fn f4() void {}
-export fn f5() void {}
-export fn f6() void {}
-export fn f7() void {}
-export fn f8() void {}
-export fn f9() void {}
-const std = @import("std");
-#expect_stdout=""
diff --git a/test/link/build.zig.zon b/test/link/build.zig.zon
index 16bba08c4e..ab44726091 100644
--- a/test/link/build.zig.zon
+++ b/test/link/build.zig.zon
@@ -1,5 +1,6 @@
.{
- .name = "link_test_cases",
+ .name = .link_test_cases,
+ .fingerprint = 0x404f657576fec9f2,
.version = "0.0.0",
.dependencies = .{
.bss = .{
diff --git a/test/link/elf.zig b/test/link/elf.zig
index f6dfbbea86..1d6de32f0d 100644
--- a/test/link/elf.zig
+++ b/test/link/elf.zig
@@ -210,8 +210,8 @@ fn testAbsSymbols(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.addObject(obj);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -235,7 +235,7 @@ fn testAsNeeded(b: *Build, opts: Options) *Step {
\\
,
});
- main_o.linkLibC();
+ main_o.root_module.link_libc = true;
const libfoo = addSharedLibrary(b, opts, .{ .name = "foo" });
addCSourceBytes(libfoo, "int foo() { return 42; }", &.{});
@@ -253,17 +253,17 @@ fn testAsNeeded(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{
.name = "test",
});
- exe.addObject(main_o);
- exe.linkSystemLibrary2("foo", .{ .needed = true });
- exe.addLibraryPath(libfoo.getEmittedBinDirectory());
- exe.addRPath(libfoo.getEmittedBinDirectory());
- exe.linkSystemLibrary2("bar", .{ .needed = true });
- exe.addLibraryPath(libbar.getEmittedBinDirectory());
- exe.addRPath(libbar.getEmittedBinDirectory());
- exe.linkSystemLibrary2("baz", .{ .needed = true });
- exe.addLibraryPath(libbaz.getEmittedBinDirectory());
- exe.addRPath(libbaz.getEmittedBinDirectory());
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkSystemLibrary("foo", .{ .needed = true });
+ exe.root_module.addLibraryPath(libfoo.getEmittedBinDirectory());
+ exe.root_module.addRPath(libfoo.getEmittedBinDirectory());
+ exe.root_module.linkSystemLibrary("bar", .{ .needed = true });
+ exe.root_module.addLibraryPath(libbar.getEmittedBinDirectory());
+ exe.root_module.addRPath(libbar.getEmittedBinDirectory());
+ exe.root_module.linkSystemLibrary("baz", .{ .needed = true });
+ exe.root_module.addLibraryPath(libbaz.getEmittedBinDirectory());
+ exe.root_module.addRPath(libbaz.getEmittedBinDirectory());
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42\n");
@@ -281,17 +281,17 @@ fn testAsNeeded(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{
.name = "test",
});
- exe.addObject(main_o);
- exe.linkSystemLibrary2("foo", .{ .needed = false });
- exe.addLibraryPath(libfoo.getEmittedBinDirectory());
- exe.addRPath(libfoo.getEmittedBinDirectory());
- exe.linkSystemLibrary2("bar", .{ .needed = false });
- exe.addLibraryPath(libbar.getEmittedBinDirectory());
- exe.addRPath(libbar.getEmittedBinDirectory());
- exe.linkSystemLibrary2("baz", .{ .needed = false });
- exe.addLibraryPath(libbaz.getEmittedBinDirectory());
- exe.addRPath(libbaz.getEmittedBinDirectory());
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkSystemLibrary("foo", .{ .needed = false });
+ exe.root_module.addLibraryPath(libfoo.getEmittedBinDirectory());
+ exe.root_module.addRPath(libfoo.getEmittedBinDirectory());
+ exe.root_module.linkSystemLibrary("bar", .{ .needed = false });
+ exe.root_module.addLibraryPath(libbar.getEmittedBinDirectory());
+ exe.root_module.addRPath(libbar.getEmittedBinDirectory());
+ exe.root_module.linkSystemLibrary("baz", .{ .needed = false });
+ exe.root_module.addLibraryPath(libbaz.getEmittedBinDirectory());
+ exe.root_module.addRPath(libbaz.getEmittedBinDirectory());
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42\n");
@@ -351,15 +351,15 @@ fn testCanonicalPlt(b: *Build, opts: Options) *Step {
,
.pic = false,
});
- main_o.linkLibC();
+ main_o.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{
.name = "main",
});
- exe.addObject(main_o);
- exe.addObject(b_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
exe.pie = false;
const run = addRunArtifact(exe);
@@ -384,7 +384,7 @@ fn testComdatElimination(b: *Build, opts: Options) *Step {
\\}
,
});
- a_o.linkLibCpp();
+ a_o.root_module.link_libcpp = true;
const main_o = addObject(b, opts, .{
.name = "main",
@@ -401,13 +401,13 @@ fn testComdatElimination(b: *Build, opts: Options) *Step {
\\}
,
});
- main_o.linkLibCpp();
+ main_o.root_module.link_libcpp = true;
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(main_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(main_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(
@@ -420,9 +420,9 @@ fn testComdatElimination(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
- exe.addObject(a_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(
@@ -435,12 +435,12 @@ fn testComdatElimination(b: *Build, opts: Options) *Step {
{
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(main_o);
- c_o.addObject(a_o);
+ c_o.root_module.addObject(main_o);
+ c_o.root_module.addObject(a_o);
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(c_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(c_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(
@@ -453,12 +453,12 @@ fn testComdatElimination(b: *Build, opts: Options) *Step {
{
const d_o = addObject(b, opts, .{ .name = "d" });
- d_o.addObject(a_o);
- d_o.addObject(main_o);
+ d_o.root_module.addObject(a_o);
+ d_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main4" });
- exe.addObject(d_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(d_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(
@@ -522,7 +522,7 @@ fn testCommonSymbols(b: *Build, opts: Options) *Step {
\\ printf("%d %d %d\n", foo, bar, baz);
\\}
, &.{"-fcommon"});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("0 5 42\n");
@@ -549,7 +549,7 @@ fn testCommonSymbolsInArchive(b: *Build, opts: Options) *Step {
,
.c_source_flags = &.{"-fcommon"},
});
- a_o.linkLibC();
+ a_o.root_module.link_libc = true;
const b_o = addObject(b, opts, .{
.name = "b",
@@ -575,16 +575,16 @@ fn testCommonSymbolsInArchive(b: *Build, opts: Options) *Step {
});
const lib = addStaticLibrary(b, opts, .{ .name = "lib" });
- lib.addObject(b_o);
- lib.addObject(c_o);
- lib.addObject(d_o);
+ lib.root_module.addObject(b_o);
+ lib.root_module.addObject(c_o);
+ lib.root_module.addObject(d_o);
const exe = addExecutable(b, opts, .{
.name = "test",
});
- exe.addObject(a_o);
- exe.linkLibrary(lib);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.linkLibrary(lib);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("5 0 0 -1\n");
@@ -603,15 +603,15 @@ fn testCommonSymbolsInArchive(b: *Build, opts: Options) *Step {
});
const lib = addStaticLibrary(b, opts, .{ .name = "lib" });
- lib.addObject(b_o);
- lib.addObject(e_o);
+ lib.root_module.addObject(b_o);
+ lib.root_module.addObject(e_o);
const exe = addExecutable(b, opts, .{
.name = "test",
});
- exe.addObject(a_o);
- exe.linkLibrary(lib);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.linkLibrary(lib);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("5 0 7 2\n");
@@ -641,8 +641,8 @@ fn testCopyrel(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3 5\n");
@@ -679,8 +679,8 @@ fn testCopyrelAlias(b: *Build, opts: Options) *Step {
\\extern int bar;
\\int *get_bar() { return &bar; }
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
exe.pie = false;
const run = addRunArtifact(exe);
@@ -712,15 +712,15 @@ fn testCopyrelAlignment(b: *Build, opts: Options) *Step {
,
.pic = false,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
const exp_stdout = "5\n";
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj);
- exe.linkLibrary(a_so);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(a_so);
+ exe.root_module.link_libc = true;
exe.pie = false;
const run = addRunArtifact(exe);
@@ -737,9 +737,9 @@ fn testCopyrelAlignment(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj);
- exe.linkLibrary(b_so);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(b_so);
+ exe.root_module.link_libc = true;
exe.pie = false;
const run = addRunArtifact(exe);
@@ -756,9 +756,9 @@ fn testCopyrelAlignment(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj);
- exe.linkLibrary(c_so);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(c_so);
+ exe.root_module.link_libc = true;
exe.pie = false;
const run = addRunArtifact(exe);
@@ -793,7 +793,7 @@ fn testDsoPlt(b: *Build, opts: Options) *Step {
\\ real_hello();
\\}
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "test" });
addCSourceBytes(exe,
@@ -806,8 +806,8 @@ fn testDsoPlt(b: *Build, opts: Options) *Step {
\\ hello();
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello WORLD\n");
@@ -825,7 +825,7 @@ fn testDsoUndef(b: *Build, opts: Options) *Step {
\\int bar = 5;
\\int baz() { return foo; }
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const obj = addObject(b, opts, .{
.name = "obj",
@@ -833,18 +833,18 @@ fn testDsoUndef(b: *Build, opts: Options) *Step {
});
const lib = addStaticLibrary(b, opts, .{ .name = "lib" });
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const exe = addExecutable(b, opts, .{ .name = "test" });
- exe.linkLibrary(dso);
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.linkLibrary(lib);
addCSourceBytes(exe,
\\extern int bar;
\\int main() {
\\ return bar - 5;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -871,7 +871,7 @@ fn testEmitRelocatable(b: *Build, opts: Options) *Step {
\\ std.debug.print("foo={d}\n", .{foo()});
\\}
});
- a_o.linkLibC();
+ a_o.root_module.link_libc = true;
const b_o = addObject(b, opts, .{ .name = "b", .c_source_bytes =
\\#include <stdio.h>
@@ -880,11 +880,11 @@ fn testEmitRelocatable(b: *Build, opts: Options) *Step {
\\ fprintf(stderr, "bar=%d\n", bar);
\\}
});
- b_o.linkLibC();
+ b_o.root_module.link_libc = true;
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(a_o);
- c_o.addObject(b_o);
+ c_o.root_module.addObject(a_o);
+ c_o.root_module.addObject(b_o);
const exe = addExecutable(b, opts, .{ .name = "test", .zig_source_bytes =
\\const std = @import("std");
@@ -895,8 +895,8 @@ fn testEmitRelocatable(b: *Build, opts: Options) *Step {
\\ printBar();
\\}
});
- exe.addObject(c_o);
- exe.linkLibC();
+ exe.root_module.addObject(c_o);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdErrEqual(
@@ -944,9 +944,9 @@ fn testEmitStaticLib(b: *Build, opts: Options) *Step {
});
const lib = addStaticLibrary(b, opts, .{ .name = "lib" });
- lib.addObject(obj1);
- lib.addObject(obj2);
- lib.addObject(obj3);
+ lib.root_module.addObject(obj1);
+ lib.root_module.addObject(obj2);
+ lib.root_module.addObject(obj3);
const check = lib.checkObject();
check.checkInArchiveSymtab();
@@ -996,7 +996,7 @@ fn testEmitStaticLibZig(b: *Build, opts: Options) *Step {
\\}
,
});
- lib.addObject(obj1);
+ lib.root_module.addObject(obj1);
const exe = addExecutable(b, opts, .{
.name = "test",
@@ -1008,7 +1008,7 @@ fn testEmitStaticLibZig(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
const run = addRunArtifact(exe);
run.expectStdErrEqual("44");
@@ -1023,7 +1023,7 @@ fn testEmptyObject(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{ .name = "test" });
addCSourceBytes(exe, "int main() { return 0; }", &.{});
addCSourceBytes(exe, "", &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -1052,8 +1052,8 @@ fn testEntryPoint(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(a_o);
- exe.addObject(b_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
exe.entry = .{ .symbol_name = "foo" };
const check = exe.checkObject();
@@ -1068,8 +1068,8 @@ fn testEntryPoint(b: *Build, opts: Options) *Step {
// cause an artifact collision taking the cached executable from the above
// step instead of generating a new one.
const exe = addExecutable(b, opts, .{ .name = "other" });
- exe.addObject(a_o);
- exe.addObject(b_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
exe.entry = .{ .symbol_name = "bar" };
const check = exe.checkObject();
@@ -1113,8 +1113,8 @@ fn testExportDynamic(b: *Build, opts: Options) *Step {
\\ return baz;
\\}
, &.{});
- exe.addObject(obj);
- exe.linkLibrary(dso);
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(dso);
exe.rdynamic = true;
const check = exe.checkObject();
@@ -1152,8 +1152,8 @@ fn testExportSymbolsFromExe(b: *Build, opts: Options) *Step {
\\ foo();
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInDynamicSymtab();
@@ -1181,7 +1181,7 @@ fn testFuncAddress(b: *Build, opts: Options) *Step {
\\ assert(fn == ptr);
\\}
, &.{});
- exe.linkLibrary(dso);
+ exe.root_module.linkLibrary(dso);
exe.root_module.pic = false;
exe.pie = false;
@@ -1216,15 +1216,15 @@ fn testGcSections(b: *Build, opts: Options) *Step {
});
obj.link_function_sections = true;
obj.link_data_sections = true;
- obj.linkLibC();
- obj.linkLibCpp();
+ obj.root_module.link_libc = true;
+ obj.root_module.link_libcpp = true;
{
const exe = addExecutable(b, opts, .{ .name = "test" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = false;
- exe.linkLibC();
- exe.linkLibCpp();
+ exe.root_module.link_libc = true;
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2\n");
@@ -1252,10 +1252,10 @@ fn testGcSections(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "test" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = true;
- exe.linkLibC();
- exe.linkLibCpp();
+ exe.root_module.link_libc = true;
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2\n");
@@ -1321,7 +1321,7 @@ fn testGcSectionsZig(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = false;
const run = addRunArtifact(exe);
@@ -1363,7 +1363,7 @@ fn testGcSectionsZig(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = true;
const run = addRunArtifact(exe);
@@ -1427,7 +1427,7 @@ fn testIFuncAlias(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.root_module.pic = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -1467,9 +1467,9 @@ fn testIFuncDlopen(b: *Build, opts: Options) *Step {
\\ assert(foo == p);
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
- exe.linkSystemLibrary2("dl", .{});
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
+ exe.root_module.linkSystemLibrary("dl", .{});
exe.root_module.pic = false;
exe.pie = false;
@@ -1498,7 +1498,7 @@ fn testIFuncDso(b: *Build, opts: Options) *Step {
\\}
,
});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{
.name = "main",
@@ -1509,7 +1509,7 @@ fn testIFuncDso(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(dso);
+ exe.root_module.linkLibrary(dso);
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world\n");
@@ -1540,7 +1540,7 @@ fn testIFuncDynamic(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe, main_c, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
exe.link_z_lazy = true;
const run = addRunArtifact(exe);
@@ -1550,7 +1550,7 @@ fn testIFuncDynamic(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "other" });
addCSourceBytes(exe, main_c, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world\n");
@@ -1576,7 +1576,7 @@ fn testIFuncExport(b: *Build, opts: Options) *Step {
\\ return real_foobar;
\\}
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const check = dso.checkObject();
check.checkInDynamicSymtab();
@@ -1613,7 +1613,7 @@ fn testIFuncFuncPtr(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.root_module.pic = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3\n");
@@ -1642,7 +1642,7 @@ fn testIFuncNoPlt(b: *Build, opts: Options) *Step {
\\}
, &.{"-fno-plt"});
exe.root_module.pic = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world\n");
@@ -1669,7 +1669,7 @@ fn testIFuncStatic(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
exe.linkage = .static;
const run = addRunArtifact(exe);
@@ -1700,7 +1700,7 @@ fn testIFuncStaticPie(b: *Build, opts: Options) *Step {
exe.linkage = .static;
exe.root_module.pic = true;
exe.pie = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world\n");
@@ -1733,7 +1733,7 @@ fn testImageBase(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
exe.image_base = 0x8000000;
const run = addRunArtifact(exe);
@@ -1779,7 +1779,7 @@ fn testImportingDataDynamic(b: *Build, opts: Options) *Step {
\\void printFoo() { fprintf(stderr, "lib foo=%d\n", foo); }
,
});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const main = addExecutable(b, opts, .{
.name = "main",
@@ -1798,7 +1798,7 @@ fn testImportingDataDynamic(b: *Build, opts: Options) *Step {
.strip = true, // TODO temp hack
});
main.pie = true;
- main.linkLibrary(dso);
+ main.root_module.linkLibrary(dso);
const run = addRunArtifact(main);
run.expectStdErrEqual(
@@ -1832,7 +1832,7 @@ fn testImportingDataStatic(b: *Build, opts: Options) *Step {
}, .{
.name = "a",
});
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const main = addExecutable(b, opts, .{
.name = "main",
@@ -1844,8 +1844,8 @@ fn testImportingDataStatic(b: *Build, opts: Options) *Step {
,
.strip = true, // TODO temp hack
});
- main.linkLibrary(lib);
- main.linkLibC();
+ main.root_module.linkLibrary(lib);
+ main.root_module.link_libc = true;
const run = addRunArtifact(main);
run.expectStdErrEqual("42\n");
@@ -1864,7 +1864,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((constructor(10000))) void init4() { printf("1"); }
,
});
- a_o.linkLibC();
+ a_o.root_module.link_libc = true;
const b_o = addObject(b, opts, .{
.name = "b",
@@ -1873,7 +1873,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((constructor(1000))) void init3() { printf("2"); }
,
});
- b_o.linkLibC();
+ b_o.root_module.link_libc = true;
const c_o = addObject(b, opts, .{
.name = "c",
@@ -1882,7 +1882,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((constructor)) void init1() { printf("3"); }
,
});
- c_o.linkLibC();
+ c_o.root_module.link_libc = true;
const d_o = addObject(b, opts, .{
.name = "d",
@@ -1891,7 +1891,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((constructor)) void init2() { printf("4"); }
,
});
- d_o.linkLibC();
+ d_o.root_module.link_libc = true;
const e_o = addObject(b, opts, .{
.name = "e",
@@ -1900,7 +1900,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((destructor(10000))) void fini4() { printf("5"); }
,
});
- e_o.linkLibC();
+ e_o.root_module.link_libc = true;
const f_o = addObject(b, opts, .{
.name = "f",
@@ -1909,7 +1909,7 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((destructor(1000))) void fini3() { printf("6"); }
,
});
- f_o.linkLibC();
+ f_o.root_module.link_libc = true;
const g_o = addObject(b, opts, .{
.name = "g",
@@ -1918,24 +1918,24 @@ fn testInitArrayOrder(b: *Build, opts: Options) *Step {
\\__attribute__((destructor)) void fini1() { printf("7"); }
,
});
- g_o.linkLibC();
+ g_o.root_module.link_libc = true;
const h_o = addObject(b, opts, .{ .name = "h", .c_source_bytes =
\\#include <stdio.h>
\\__attribute__((destructor)) void fini2() { printf("8"); }
});
- h_o.linkLibC();
+ h_o.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe, "int main() { return 0; }", &.{});
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(c_o);
- exe.addObject(d_o);
- exe.addObject(e_o);
- exe.addObject(f_o);
- exe.addObject(g_o);
- exe.addObject(h_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(c_o);
+ exe.root_module.addObject(d_o);
+ exe.root_module.addObject(e_o);
+ exe.root_module.addObject(f_o);
+ exe.root_module.addObject(g_o);
+ exe.root_module.addObject(h_o);
if (opts.target.result.isGnuLibC()) {
// TODO I think we need to clarify our use of `-fPIC -fPIE` flags for different targets
@@ -1970,7 +1970,7 @@ fn testLargeAlignmentDso(b: *Build, opts: Options) *Step {
\\}
, &.{});
dso.link_function_sections = true;
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const check = dso.checkObject();
check.checkInSymtab();
@@ -1986,8 +1986,8 @@ fn testLargeAlignmentDso(b: *Build, opts: Options) *Step {
\\void greet();
\\int main() { greet(); }
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world");
@@ -2021,7 +2021,7 @@ fn testLargeAlignmentExe(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.link_function_sections = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInSymtab();
@@ -2049,7 +2049,7 @@ fn testLargeBss(b: *Build, opts: Options) *Step {
\\ return arr[2000];
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
// Disabled to work around the ELF linker crashing.
// Can be reproduced on a x86_64-linux host by commenting out the line below.
exe.root_module.sanitize_c = .off;
@@ -2071,10 +2071,10 @@ fn testLinkOrder(b: *Build, opts: Options) *Step {
});
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(obj);
+ dso.root_module.addObject(obj);
const lib = addStaticLibrary(b, opts, .{ .name = "b" });
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const main_o = addObject(b, opts, .{
.name = "main",
@@ -2089,14 +2089,14 @@ fn testLinkOrder(b: *Build, opts: Options) *Step {
// https://github.com/ziglang/zig/issues/17450
// {
// const exe = addExecutable(b, opts, .{ .name = "main1"});
- // exe.addObject(main_o);
- // exe.linkSystemLibrary2("a", .{});
- // exe.addLibraryPath(dso.getEmittedBinDirectory());
- // exe.addRPath(dso.getEmittedBinDirectory());
- // exe.linkSystemLibrary2("b", .{});
- // exe.addLibraryPath(lib.getEmittedBinDirectory());
- // exe.addRPath(lib.getEmittedBinDirectory());
- // exe.linkLibC();
+ // exe.root_module.addObject(main_o);
+ // exe.root_module.linkSystemLibrary("a", .{});
+ // exe.root_module.addLibraryPath(dso.getEmittedBinDirectory());
+ // exe.root_module.addRPath(dso.getEmittedBinDirectory());
+ // exe.root_module.linkSystemLibrary("b", .{});
+ // exe.root_module.addLibraryPath(lib.getEmittedBinDirectory());
+ // exe.root_module.addRPath(lib.getEmittedBinDirectory());
+ // exe.root_module.link_libc = true;
// const check = exe.checkObject();
// check.checkInDynamicSection();
@@ -2106,14 +2106,14 @@ fn testLinkOrder(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
- exe.linkSystemLibrary2("b", .{});
- exe.addLibraryPath(lib.getEmittedBinDirectory());
- exe.addRPath(lib.getEmittedBinDirectory());
- exe.linkSystemLibrary2("a", .{});
- exe.addLibraryPath(dso.getEmittedBinDirectory());
- exe.addRPath(dso.getEmittedBinDirectory());
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkSystemLibrary("b", .{});
+ exe.root_module.addLibraryPath(lib.getEmittedBinDirectory());
+ exe.root_module.addRPath(lib.getEmittedBinDirectory());
+ exe.root_module.linkSystemLibrary("a", .{});
+ exe.root_module.addLibraryPath(dso.getEmittedBinDirectory());
+ exe.root_module.addRPath(dso.getEmittedBinDirectory());
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInDynamicSection();
@@ -2149,14 +2149,14 @@ fn testLdScript(b: *Build, opts: Options) *Step {
\\ return bar() - baz();
\\}
, &.{});
- exe.linkSystemLibrary2("a", .{});
- exe.addLibraryPath(scripts.getDirectory());
- exe.addLibraryPath(scripts2.getDirectory());
- exe.addLibraryPath(bar.getEmittedBinDirectory());
- exe.addLibraryPath(baz.getEmittedBinDirectory());
- exe.addRPath(bar.getEmittedBinDirectory());
- exe.addRPath(baz.getEmittedBinDirectory());
- exe.linkLibC();
+ exe.root_module.linkSystemLibrary("a", .{});
+ exe.root_module.addLibraryPath(scripts.getDirectory());
+ exe.root_module.addLibraryPath(scripts2.getDirectory());
+ exe.root_module.addLibraryPath(bar.getEmittedBinDirectory());
+ exe.root_module.addLibraryPath(baz.getEmittedBinDirectory());
+ exe.root_module.addRPath(bar.getEmittedBinDirectory());
+ exe.root_module.addRPath(baz.getEmittedBinDirectory());
+ exe.root_module.link_libc = true;
exe.allow_so_scripts = true;
const run = addRunArtifact(exe);
@@ -2174,9 +2174,9 @@ fn testLdScriptPathError(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe, "int main() { return 0; }", &.{});
- exe.linkSystemLibrary2("a", .{});
- exe.addLibraryPath(scripts.getDirectory());
- exe.linkLibC();
+ exe.root_module.linkSystemLibrary("a", .{});
+ exe.root_module.addLibraryPath(scripts.getDirectory());
+ exe.root_module.link_libc = true;
exe.allow_so_scripts = true;
// TODO: A future enhancement could make this error message also mention
@@ -2213,8 +2213,8 @@ fn testLdScriptAllowUndefinedVersion(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(so);
- exe.linkLibC();
+ exe.root_module.linkLibrary(so);
+ exe.root_module.link_libc = true;
exe.allow_so_scripts = true;
const run = addRunArtifact(exe);
@@ -2269,8 +2269,8 @@ fn testMismatchedCpuArchitectureError(b: *Build, opts: Options) *Step {
\\ return foo;
\\}
, &.{});
- exe.addObject(obj);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libc = true;
expectLinkErrors(exe, test_step, .{ .exact = &.{
"invalid ELF machine type: AARCH64",
@@ -2291,7 +2291,7 @@ fn testLinkingC(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello World!\n");
@@ -2320,8 +2320,8 @@ fn testLinkingCpp(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
- exe.linkLibCpp();
+ exe.root_module.link_libc = true;
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello World!\n");
@@ -2364,7 +2364,7 @@ fn testLinkingObj(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
const run = addRunArtifact(exe);
run.expectStdErrEqual("84\n");
@@ -2389,7 +2389,7 @@ fn testLinkingStaticLib(b: *Build, opts: Options) *Step {
\\}
,
});
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const exe = addExecutable(b, opts, .{
.name = "testlib",
@@ -2402,7 +2402,7 @@ fn testLinkingStaticLib(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
const run = addRunArtifact(exe);
run.expectStdErrEqual("0\n");
@@ -2452,7 +2452,7 @@ fn testMergeStrings(b: *Build, opts: Options) *Step {
\\char16_t *utf16_1 = u"foo";
\\char32_t *utf32_1 = U"foo";
, &.{"-O2"});
- obj1.linkLibC();
+ obj1.root_module.link_libc = true;
const obj2 = addObject(b, opts, .{ .name = "b.o" });
addCSourceBytes(obj2,
@@ -2481,12 +2481,12 @@ fn testMergeStrings(b: *Build, opts: Options) *Step {
\\ assert((void*)wide1 != (void*)utf16_1);
\\}
, &.{"-O2"});
- obj2.linkLibC();
+ obj2.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj1);
- exe.addObject(obj2);
- exe.linkLibC();
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -2520,8 +2520,8 @@ fn testMergeStrings2(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(obj1);
- exe.addObject(obj2);
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -2537,11 +2537,11 @@ fn testMergeStrings2(b: *Build, opts: Options) *Step {
{
const obj3 = addObject(b, opts, .{ .name = "c" });
- obj3.addObject(obj1);
- obj3.addObject(obj2);
+ obj3.root_module.addObject(obj1);
+ obj3.root_module.addObject(obj2);
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(obj3);
+ exe.root_module.addObject(obj3);
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -2564,7 +2564,7 @@ fn testNoEhFrameHdr(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe, "int main() { return 0; }", &.{});
exe.link_eh_frame_hdr = false;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInHeaders();
@@ -2586,7 +2586,7 @@ fn testPie(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
exe.root_module.pic = true;
exe.pie = true;
@@ -2617,7 +2617,7 @@ fn testPltGot(b: *Build, opts: Options) *Step {
\\ printf("Hello world\n");
\\}
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe,
@@ -2626,9 +2626,9 @@ fn testPltGot(b: *Build, opts: Options) *Step {
\\void foo() { ignore(hello); }
\\int main() { hello(); }
, &.{});
- exe.linkLibrary(dso);
+ exe.root_module.linkLibrary(dso);
exe.root_module.pic = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world\n");
@@ -2647,7 +2647,7 @@ fn testPreinitArray(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
const check = exe.checkObject();
check.checkInDynamicSection();
@@ -2662,7 +2662,7 @@ fn testPreinitArray(b: *Build, opts: Options) *Step {
\\__attribute__((section(".preinit_array")))
\\void *preinit[] = { preinit_fn };
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInDynamicSection();
@@ -2710,15 +2710,15 @@ fn testRelocatableArchive(b: *Build, opts: Options) *Step {
});
const lib = addStaticLibrary(b, opts, .{ .name = "lib" });
- lib.addObject(obj1);
- lib.addObject(obj2);
- lib.addObject(obj3);
+ lib.root_module.addObject(obj1);
+ lib.root_module.addObject(obj2);
+ lib.root_module.addObject(obj3);
const obj5 = addObject(b, opts, .{
.name = "obj5",
});
- obj5.addObject(obj4);
- obj5.linkLibrary(lib);
+ obj5.root_module.addObject(obj4);
+ obj5.root_module.linkLibrary(lib);
const check = obj5.checkObject();
check.checkInSymtab();
@@ -2744,7 +2744,7 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
\\}
,
});
- obj1.linkLibCpp();
+ obj1.root_module.link_libcpp = true;
const obj2 = addObject(b, opts, .{
.name = "obj2",
.cpp_source_bytes =
@@ -2754,7 +2754,7 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
\\}
,
});
- obj2.linkLibCpp();
+ obj2.root_module.link_libcpp = true;
const obj3 = addObject(b, opts, .{ .name = "obj3", .cpp_source_bytes =
\\#include <iostream>
\\#include <stdexcept>
@@ -2768,18 +2768,18 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
});
- obj3.linkLibCpp();
+ obj3.root_module.link_libcpp = true;
{
const obj = addObject(b, opts, .{ .name = "obj" });
- obj.addObject(obj1);
- obj.addObject(obj2);
- obj.linkLibCpp();
+ obj.root_module.addObject(obj1);
+ obj.root_module.addObject(obj2);
+ obj.root_module.link_libcpp = true;
const exe = addExecutable(b, opts, .{ .name = "test1" });
- exe.addObject(obj3);
- exe.addObject(obj);
- exe.linkLibCpp();
+ exe.root_module.addObject(obj3);
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
@@ -2788,14 +2788,14 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
{
// Flipping the order should not influence the end result.
const obj = addObject(b, opts, .{ .name = "obj" });
- obj.addObject(obj2);
- obj.addObject(obj1);
- obj.linkLibCpp();
+ obj.root_module.addObject(obj2);
+ obj.root_module.addObject(obj1);
+ obj.root_module.link_libcpp = true;
const exe = addExecutable(b, opts, .{ .name = "test2" });
- exe.addObject(obj3);
- exe.addObject(obj);
- exe.linkLibCpp();
+ exe.root_module.addObject(obj3);
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
@@ -2817,7 +2817,7 @@ fn testRelocatableEhFrameComdatHeavy(b: *Build, opts: Options) *Step {
\\}
,
});
- obj1.linkLibCpp();
+ obj1.root_module.link_libcpp = true;
const obj2 = addObject(b, opts, .{
.name = "obj2",
.cpp_source_bytes =
@@ -2827,7 +2827,7 @@ fn testRelocatableEhFrameComdatHeavy(b: *Build, opts: Options) *Step {
\\}
,
});
- obj2.linkLibCpp();
+ obj2.root_module.link_libcpp = true;
const obj3 = addObject(b, opts, .{
.name = "obj3",
.cpp_source_bytes =
@@ -2844,17 +2844,17 @@ fn testRelocatableEhFrameComdatHeavy(b: *Build, opts: Options) *Step {
\\}
,
});
- obj3.linkLibCpp();
+ obj3.root_module.link_libcpp = true;
const obj = addObject(b, opts, .{ .name = "obj" });
- obj.addObject(obj1);
- obj.addObject(obj2);
- obj.addObject(obj3);
- obj.linkLibCpp();
+ obj.root_module.addObject(obj1);
+ obj.root_module.addObject(obj2);
+ obj.root_module.addObject(obj3);
+ obj.root_module.link_libcpp = true;
const exe = addExecutable(b, opts, .{ .name = "test2" });
- exe.addObject(obj);
- exe.linkLibCpp();
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
@@ -2880,7 +2880,7 @@ fn testRelocatableMergeStrings(b: *Build, opts: Options) *Step {
});
const obj2 = addObject(b, opts, .{ .name = "b" });
- obj2.addObject(obj1);
+ obj2.root_module.addObject(obj1);
const check = obj2.checkObject();
check.dumpSection(".rodata.str1.1");
@@ -2905,7 +2905,7 @@ fn testRelocatableNoEhFrame(b: *Build, opts: Options) *Step {
const obj2 = addObject(b, opts, .{
.name = "obj2",
});
- obj2.addObject(obj1);
+ obj2.root_module.addObject(obj1);
const check1 = obj1.checkObject();
check1.checkInHeaders();
@@ -2940,12 +2940,12 @@ fn testSharedAbsSymbol(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(obj);
- exe.linkLibrary(dso);
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(dso);
exe.pie = true;
const run = addRunArtifact(exe);
@@ -2965,8 +2965,8 @@ fn testSharedAbsSymbol(b: *Build, opts: Options) *Step {
// https://github.com/ziglang/zig/issues/17430
// {
// const exe = addExecutable(b, opts, .{ .name = "main2"});
- // exe.addObject(obj);
- // exe.linkLibrary(dso);
+ // exe.root_module.addObject(obj);
+ // exe.root_module.linkLibrary(dso);
// exe.pie = false;
// const run = addRunArtifact(exe);
@@ -2999,13 +2999,13 @@ fn testStrip(b: *Build, opts: Options) *Step {
\\}
,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.root_module.strip = false;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInHeaders();
@@ -3016,9 +3016,9 @@ fn testStrip(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.root_module.strip = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInHeaders();
@@ -3074,7 +3074,7 @@ fn testTlsDfStaticTls(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(obj);
+ dso.root_module.addObject(obj);
// dso.link_relax = true;
const check = dso.checkObject();
@@ -3086,7 +3086,7 @@ fn testTlsDfStaticTls(b: *Build, opts: Options) *Step {
// TODO add -Wl,--no-relax
// {
// const dso = addSharedLibrary(b, opts, .{ .name = "a"});
- // dso.addObject(obj);
+ // dso.root_module.addObject(obj);
// dso.link_relax = false;
// const check = dso.checkObject();
@@ -3128,8 +3128,8 @@ fn testTlsDso(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("5 3 5 3 5 3\n");
@@ -3159,7 +3159,7 @@ fn testTlsGd(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- main_o.linkLibC();
+ main_o.root_module.link_libc = true;
const a_o = addObject(b, opts, .{
.name = "a",
@@ -3184,17 +3184,17 @@ fn testTlsGd(b: *Build, opts: Options) *Step {
const exp_stdout = "1 2 3 4 5 6\n";
const dso1 = addSharedLibrary(b, opts, .{ .name = "a" });
- dso1.addObject(a_o);
+ dso1.root_module.addObject(a_o);
const dso2 = addSharedLibrary(b, opts, .{ .name = "b" });
- dso2.addObject(b_o);
+ dso2.root_module.addObject(b_o);
// dso2.link_relax = false; // TODO
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.linkLibrary(dso1);
- exe.linkLibrary(dso2);
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(dso1);
+ exe.root_module.linkLibrary(dso2);
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -3203,10 +3203,10 @@ fn testTlsGd(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
// exe.link_relax = false; // TODO
- exe.linkLibrary(dso1);
- exe.linkLibrary(dso2);
+ exe.root_module.linkLibrary(dso1);
+ exe.root_module.linkLibrary(dso2);
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -3216,9 +3216,9 @@ fn testTlsGd(b: *Build, opts: Options) *Step {
// https://github.com/ziglang/zig/issues/17430 ??
// {
// const exe = addExecutable(b, opts, .{ .name = "main3"});
- // exe.addObject(main_o);
- // exe.linkLibrary(dso1);
- // exe.linkLibrary(dso2);
+ // exe.root_module.addObject(main_o);
+ // exe.root_module.linkLibrary(dso1);
+ // exe.root_module.linkLibrary(dso2);
// exe.linkage = .static;
// const run = addRunArtifact(exe);
@@ -3228,10 +3228,10 @@ fn testTlsGd(b: *Build, opts: Options) *Step {
// {
// const exe = addExecutable(b, opts, .{ .name = "main4"});
- // exe.addObject(main_o);
+ // exe.root_module.addObject(main_o);
// // exe.link_relax = false; // TODO
- // exe.linkLibrary(dso1);
- // exe.linkLibrary(dso2);
+ // exe.root_module.linkLibrary(dso1);
+ // exe.root_module.linkLibrary(dso2);
// exe.linkage = .static;
// const run = addRunArtifact(exe);
@@ -3265,7 +3265,7 @@ fn testTlsGdNoPlt(b: *Build, opts: Options) *Step {
.c_source_flags = &.{"-fno-plt"},
.pic = true,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
const a_so = addSharedLibrary(b, opts, .{ .name = "a" });
addCSourceBytes(a_so,
@@ -3284,10 +3284,10 @@ fn testTlsGdNoPlt(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(obj);
- exe.linkLibrary(a_so);
- exe.linkLibrary(b_so);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(a_so);
+ exe.root_module.linkLibrary(b_so);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2 3 4 5 6\n");
@@ -3296,10 +3296,10 @@ fn testTlsGdNoPlt(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(obj);
- exe.linkLibrary(a_so);
- exe.linkLibrary(b_so);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.linkLibrary(a_so);
+ exe.root_module.linkLibrary(b_so);
+ exe.root_module.link_libc = true;
// exe.link_relax = false; // TODO
const run = addRunArtifact(exe);
@@ -3329,7 +3329,7 @@ fn testTlsGdToIe(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- a_o.linkLibC();
+ a_o.root_module.link_libc = true;
const b_o = addObject(b, opts, .{
.name = "b",
@@ -3342,12 +3342,12 @@ fn testTlsGdToIe(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a1" });
- dso.addObject(a_o);
+ dso.root_module.addObject(a_o);
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(b_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(b_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2 3\n");
@@ -3356,13 +3356,13 @@ fn testTlsGdToIe(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a2" });
- dso.addObject(a_o);
+ dso.root_module.addObject(a_o);
// dso.link_relax = false; // TODO
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(b_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(b_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2 3\n");
@@ -3371,12 +3371,12 @@ fn testTlsGdToIe(b: *Build, opts: Options) *Step {
// {
// const dso = addSharedLibrary(b, opts, .{ .name = "a"});
- // dso.addObject(a_o);
+ // dso.root_module.addObject(a_o);
// dso.link_z_nodlopen = true;
// const exe = addExecutable(b, opts, .{ .name = "main"});
- // exe.addObject(b_o);
- // exe.linkLibrary(dso);
+ // exe.root_module.addObject(b_o);
+ // exe.root_module.linkLibrary(dso);
// const run = addRunArtifact(exe);
// run.expectStdOutEqual("1 2 3\n");
@@ -3385,13 +3385,13 @@ fn testTlsGdToIe(b: *Build, opts: Options) *Step {
// {
// const dso = addSharedLibrary(b, opts, .{ .name = "a"});
- // dso.addObject(a_o);
+ // dso.root_module.addObject(a_o);
// dso.link_relax = false;
// dso.link_z_nodlopen = true;
// const exe = addExecutable(b, opts, .{ .name = "main"});
- // exe.addObject(b_o);
- // exe.linkLibrary(dso);
+ // exe.root_module.addObject(b_o);
+ // exe.root_module.linkLibrary(dso);
// const run = addRunArtifact(exe);
// run.expectStdOutEqual("1 2 3\n");
@@ -3417,7 +3417,7 @@ fn testTlsIe(b: *Build, opts: Options) *Step {
\\ printf("%d %d ", foo, bar);
\\}
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const main_o = addObject(b, opts, .{
.name = "main",
@@ -3435,15 +3435,15 @@ fn testTlsIe(b: *Build, opts: Options) *Step {
\\}
,
});
- main_o.linkLibC();
+ main_o.root_module.link_libc = true;
const exp_stdout = "0 0 3 5 7\n";
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -3452,9 +3452,9 @@ fn testTlsIe(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
// exe.link_relax = false; // TODO
const run = addRunArtifact(exe);
@@ -3500,17 +3500,17 @@ fn testTlsLargeAlignment(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- c_o.linkLibC();
+ c_o.root_module.link_libc = true;
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(a_o);
- dso.addObject(b_o);
+ dso.root_module.addObject(a_o);
+ dso.root_module.addObject(b_o);
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(c_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(c_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42 1 2 3\n");
@@ -3519,10 +3519,10 @@ fn testTlsLargeAlignment(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(c_o);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(c_o);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42 1 2 3\n");
@@ -3555,7 +3555,7 @@ fn testTlsLargeTbss(b: *Build, opts: Options) *Step {
\\ printf("%d %d %d %d %d %d\n", x[0], x[1], x[1023], y[0], y[1], y[1023]);
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
// Disabled to work around the ELF linker crashing.
// Can be reproduced on a x86_64-linux host by commenting out the line below.
exe.root_module.sanitize_c = .off;
@@ -3580,7 +3580,7 @@ fn testTlsLargeStaticImage(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.root_module.pic = true;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2 3 0 5\n");
@@ -3609,7 +3609,7 @@ fn testTlsLd(b: *Build, opts: Options) *Step {
.c_source_flags = &.{"-ftls-model=local-dynamic"},
.pic = true,
});
- main_o.linkLibC();
+ main_o.root_module.link_libc = true;
const a_o = addObject(b, opts, .{
.name = "a",
@@ -3622,9 +3622,9 @@ fn testTlsLd(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.addObject(a_o);
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -3633,9 +3633,9 @@ fn testTlsLd(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
- exe.addObject(a_o);
- exe.linkLibC();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.link_libc = true;
// exe.link_relax = false; // TODO
const run = addRunArtifact(exe);
@@ -3668,8 +3668,8 @@ fn testTlsLdDso(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("1 2\n");
@@ -3699,7 +3699,7 @@ fn testTlsLdNoPlt(b: *Build, opts: Options) *Step {
.c_source_flags = &.{ "-ftls-model=local-dynamic", "-fno-plt" },
.pic = true,
});
- a_o.linkLibC();
+ a_o.root_module.link_libc = true;
const b_o = addObject(b, opts, .{
.name = "b",
@@ -3710,9 +3710,9 @@ fn testTlsLdNoPlt(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3 5 3 5\n");
@@ -3721,9 +3721,9 @@ fn testTlsLdNoPlt(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.link_libc = true;
// exe.link_relax = false; // TODO
const run = addRunArtifact(exe);
@@ -3756,7 +3756,7 @@ fn testTlsNoPic(b: *Build, opts: Options) *Step {
\\__attribute__((tls_model("global-dynamic"))) _Thread_local int foo;
, &.{});
exe.root_module.pic = false;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3 5 3 5\n");
@@ -3784,7 +3784,7 @@ fn testTlsOffsetAlignment(b: *Build, opts: Options) *Step {
\\ return NULL;
\\}
, &.{});
- dso.linkLibC();
+ dso.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe,
@@ -3811,8 +3811,8 @@ fn testTlsOffsetAlignment(b: *Build, opts: Options) *Step {
\\ pthread_join(thread, NULL);
\\}
, &.{});
- exe.addRPath(dso.getEmittedBinDirectory());
- exe.linkLibC();
+ exe.root_module.addRPath(dso.getEmittedBinDirectory());
+ exe.root_module.link_libc = true;
exe.root_module.pic = true;
const run = addRunArtifact(exe);
@@ -3842,14 +3842,14 @@ fn testTlsPic(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe,
\\__attribute__((tls_model("global-dynamic"))) _Thread_local int foo = 3;
, &.{});
- exe.addObject(obj);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3 5 3 5\n");
@@ -3889,14 +3889,14 @@ fn testTlsSmallAlignment(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- c_o.linkLibC();
+ c_o.root_module.link_libc = true;
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(c_o);
- exe.linkLibC();
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(c_o);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42\n");
@@ -3905,13 +3905,13 @@ fn testTlsSmallAlignment(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(a_o);
- dso.addObject(b_o);
+ dso.root_module.addObject(a_o);
+ dso.root_module.addObject(b_o);
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(c_o);
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.addObject(c_o);
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("42\n");
@@ -3939,7 +3939,7 @@ fn testTlsStatic(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(
@@ -3969,8 +3969,8 @@ fn testUnknownFileTypeError(b: *Build, opts: Options) *Step {
\\ return foo;
\\}
, &.{});
- exe.linkLibrary(dylib);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dylib);
+ exe.root_module.link_libc = true;
expectLinkErrors(exe, test_step, .{
.contains = "error: failed to parse shared library: BadMagic",
@@ -3993,7 +3993,7 @@ fn testUnresolvedError(b: *Build, opts: Options) *Step {
,
.c_source_flags = &.{"-ffunction-sections"},
});
- obj1.linkLibC();
+ obj1.root_module.link_libc = true;
const obj2 = addObject(b, opts, .{
.name = "b",
@@ -4007,12 +4007,12 @@ fn testUnresolvedError(b: *Build, opts: Options) *Step {
,
.c_source_flags = &.{"-ffunction-sections"},
});
- obj2.linkLibC();
+ obj2.root_module.link_libc = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj1);
- exe.addObject(obj2);
- exe.linkLibC();
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
+ exe.root_module.link_libc = true;
expectLinkErrors(exe, test_step, .{ .exact = &.{
"error: undefined symbol: foo",
@@ -4037,12 +4037,12 @@ fn testWeakExports(b: *Build, opts: Options) *Step {
,
.pic = true,
});
- obj.linkLibC();
+ obj.root_module.link_libc = true;
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(obj);
- dso.linkLibC();
+ dso.root_module.addObject(obj);
+ dso.root_module.link_libc = true;
const check = dso.checkObject();
check.checkInDynamicSymtab();
@@ -4052,8 +4052,8 @@ fn testWeakExports(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj);
- exe.linkLibC();
+ exe.root_module.addObject(obj);
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInDynamicSymtab();
@@ -4084,8 +4084,8 @@ fn testWeakUndefsDso(b: *Build, opts: Options) *Step {
\\int bar();
\\int main() { printf("bar=%d\n", bar()); }
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("bar=-1\n");
@@ -4100,8 +4100,8 @@ fn testWeakUndefsDso(b: *Build, opts: Options) *Step {
\\int bar();
\\int main() { printf("bar=%d\n", bar()); }
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("bar=5\n");
@@ -4122,7 +4122,7 @@ fn testZNow(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(obj);
+ dso.root_module.addObject(obj);
const check = dso.checkObject();
check.checkInDynamicSection();
@@ -4132,7 +4132,7 @@ fn testZNow(b: *Build, opts: Options) *Step {
{
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(obj);
+ dso.root_module.addObject(obj);
dso.link_z_lazy = true;
const check = dso.checkObject();
@@ -4150,7 +4150,7 @@ fn testZStackSize(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{ .name = "main" });
addCSourceBytes(exe, "int main() { return 0; }", &.{});
exe.stack_size = 0x800000;
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const check = exe.checkObject();
check.checkInHeaders();
@@ -4202,8 +4202,8 @@ fn testZText(b: *Build, opts: Options) *Step {
});
const dso = addSharedLibrary(b, opts, .{ .name = "a" });
- dso.addObject(a_o);
- dso.addObject(b_o);
+ dso.root_module.addObject(a_o);
+ dso.root_module.addObject(b_o);
dso.link_z_notext = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
@@ -4214,8 +4214,8 @@ fn testZText(b: *Build, opts: Options) *Step {
\\ printf("%d\n", fnn());
\\}
, &.{});
- exe.linkLibrary(dso);
- exe.linkLibC();
+ exe.root_module.linkLibrary(dso);
+ exe.root_module.link_libc = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3\n");
diff --git a/test/link/link.zig b/test/link/link.zig
index 5d1a02f23e..601247e877 100644
--- a/test/link/link.zig
+++ b/test/link/link.zig
@@ -140,20 +140,20 @@ pub fn addRunArtifact(comp: *Compile) *Run {
pub fn addCSourceBytes(comp: *Compile, bytes: []const u8, flags: []const []const u8) void {
const b = comp.step.owner;
const file = WriteFile.create(b).add("a.c", bytes);
- comp.addCSourceFile(.{ .file = file, .flags = flags });
+ comp.root_module.addCSourceFile(.{ .file = file, .flags = flags });
}
pub fn addCppSourceBytes(comp: *Compile, bytes: []const u8, flags: []const []const u8) void {
const b = comp.step.owner;
const file = WriteFile.create(b).add("a.cpp", bytes);
- comp.addCSourceFile(.{ .file = file, .flags = flags });
+ comp.root_module.addCSourceFile(.{ .file = file, .flags = flags });
}
pub fn addAsmSourceBytes(comp: *Compile, bytes: []const u8) void {
const b = comp.step.owner;
const actual_bytes = std.fmt.allocPrint(b.allocator, "{s}\n", .{bytes}) catch @panic("OOM");
const file = WriteFile.create(b).add("a.s", actual_bytes);
- comp.addAssemblyFile(file);
+ comp.root_module.addAssemblyFile(file);
}
pub fn expectLinkErrors(comp: *Compile, test_step: *Step, expected_errors: Compile.ExpectedCompileErrors) void {
diff --git a/test/link/macho.zig b/test/link/macho.zig
index 80d861eea0..422fc89a56 100644
--- a/test/link/macho.zig
+++ b/test/link/macho.zig
@@ -127,7 +127,7 @@ fn testDeadStrip(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "no_dead_strip" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = false;
const check = exe.checkObject();
@@ -156,7 +156,7 @@ fn testDeadStrip(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "yes_dead_strip" });
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = true;
const check = exe.checkObject();
@@ -206,7 +206,7 @@ fn testDuplicateDefinitions(b: *Build, opts: Options) *Step {
\\ strong();
\\}
});
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
expectLinkErrors(exe, test_step, .{ .exact = &.{
"error: duplicate symbol definition: _strong",
@@ -235,7 +235,7 @@ fn testDeadStripDylibs(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkFramework("Cocoa", .{});
const check = exe.checkObject();
@@ -254,7 +254,7 @@ fn testDeadStripDylibs(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkFramework("Cocoa", .{});
exe.dead_strip_dylibs = true;
@@ -350,7 +350,7 @@ fn testEmptyObject(b: *Build, opts: Options) *Step {
\\ printf("Hello world!");
\\}
});
- exe.addObject(empty);
+ exe.root_module.addObject(empty);
const run = addRunArtifact(exe);
run.expectStdOutEqual("Hello world!");
@@ -451,7 +451,7 @@ fn testEntryPointDylib(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
, &.{});
- exe.linkLibrary(dylib);
+ exe.root_module.linkLibrary(dylib);
exe.entry = .{ .symbol_name = "_bootstrap" };
exe.forceUndefinedSymbol("_my_main");
@@ -604,11 +604,11 @@ fn testHeaderWeakFlags(b: *Build, opts: Options) *Step {
});
const lib = addSharedLibrary(b, opts, .{ .name = "a" });
- lib.addObject(obj1);
+ lib.root_module.addObject(obj1);
{
const exe = addExecutable(b, opts, .{ .name = "main1", .c_source_bytes = "int main() { return 0; }" });
- exe.addObject(obj1);
+ exe.root_module.addObject(obj1);
const check = exe.checkObject();
check.checkInHeaders();
@@ -642,8 +642,8 @@ fn testHeaderWeakFlags(b: *Build, opts: Options) *Step {
}
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.linkLibrary(lib);
- exe.addObject(obj);
+ exe.root_module.linkLibrary(lib);
+ exe.root_module.addObject(obj);
const check = exe.checkObject();
check.checkInHeaders();
@@ -665,7 +665,7 @@ fn testHeaderWeakFlags(b: *Build, opts: Options) *Step {
\\_main:
\\ ret
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
const check = exe.checkObject();
check.checkInHeaders();
@@ -910,7 +910,7 @@ fn testLinkingStaticLib(b: *Build, opts: Options) *Step {
\\}
,
});
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const exe = addExecutable(b, opts, .{
.name = "testlib",
@@ -923,7 +923,7 @@ fn testLinkingStaticLib(b: *Build, opts: Options) *Step {
\\}
,
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
const run = addRunArtifact(exe);
run.expectStdErrEqual("0\n");
@@ -1051,28 +1051,28 @@ fn testMergeLiteralsX64(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(b_o);
- exe.addObject(a_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
{
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(a_o);
- c_o.addObject(b_o);
- c_o.addObject(main_o);
+ c_o.root_module.addObject(a_o);
+ c_o.root_module.addObject(b_o);
+ c_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(c_o);
+ exe.root_module.addObject(c_o);
runWithChecks(test_step, exe);
}
@@ -1167,28 +1167,28 @@ fn testMergeLiteralsArm64(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(b_o);
- exe.addObject(a_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
{
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(a_o);
- c_o.addObject(b_o);
- c_o.addObject(main_o);
+ c_o.root_module.addObject(a_o);
+ c_o.root_module.addObject(b_o);
+ c_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(c_o);
+ exe.root_module.addObject(c_o);
runWithChecks(test_step, exe);
}
@@ -1259,9 +1259,9 @@ fn testMergeLiteralsArm642(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(main_o);
const check = exe.checkObject();
check.dumpSection("__TEXT,__const");
@@ -1335,17 +1335,17 @@ fn testMergeLiteralsAlignment(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(b_o);
- exe.addObject(a_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(main_o);
runWithChecks(test_step, exe);
}
@@ -1414,27 +1414,27 @@ fn testMergeLiteralsObjc(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.addObject(a_o);
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(a_o);
exe.root_module.linkFramework("Foundation", .{});
runWithChecks(test_step, exe);
}
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(a_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkFramework("Foundation", .{});
runWithChecks(test_step, exe);
}
{
const b_o = addObject(b, opts, .{ .name = "b" });
- b_o.addObject(a_o);
- b_o.addObject(main_o);
+ b_o.root_module.addObject(a_o);
+ b_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(b_o);
+ exe.root_module.addObject(b_o);
exe.root_module.linkFramework("Foundation", .{});
runWithChecks(test_step, exe);
}
@@ -1610,7 +1610,7 @@ fn testObjcpp(b: *Build, opts: Options) *Step {
\\@end
});
foo_o.root_module.addIncludePath(foo_h.dirname());
- foo_o.linkLibCpp();
+ foo_o.root_module.link_libcpp = true;
const exe = addExecutable(b, opts, .{ .name = "main", .objcpp_source_bytes =
\\#import "Foo.h"
@@ -1628,8 +1628,8 @@ fn testObjcpp(b: *Build, opts: Options) *Step {
\\}
});
exe.root_module.addIncludePath(foo_h.dirname());
- exe.addObject(foo_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(foo_o);
+ exe.root_module.link_libcpp = true;
exe.root_module.linkFramework("Foundation", .{});
const run = addRunArtifact(exe);
@@ -1693,7 +1693,7 @@ fn testReexportsZig(b: *Build, opts: Options) *Step {
\\ return bar() - foo();
\\}
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -1711,7 +1711,7 @@ fn testRelocatable(b: *Build, opts: Options) *Step {
\\ throw std::runtime_error("Oh no!");
\\}
});
- a_o.linkLibCpp();
+ a_o.root_module.link_libcpp = true;
const b_o = addObject(b, opts, .{ .name = "b", .cpp_source_bytes =
\\extern int try_me();
@@ -1733,19 +1733,19 @@ fn testRelocatable(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
});
- main_o.linkLibCpp();
+ main_o.root_module.link_libcpp = true;
const exp_stdout = "exception=Oh no!";
{
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(a_o);
- c_o.addObject(b_o);
+ c_o.root_module.addObject(a_o);
+ c_o.root_module.addObject(b_o);
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.addObject(c_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(c_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -1754,13 +1754,13 @@ fn testRelocatable(b: *Build, opts: Options) *Step {
{
const d_o = addObject(b, opts, .{ .name = "d" });
- d_o.addObject(a_o);
- d_o.addObject(b_o);
- d_o.addObject(main_o);
+ d_o.root_module.addObject(a_o);
+ d_o.root_module.addObject(b_o);
+ d_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(d_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(d_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -1805,12 +1805,12 @@ fn testRelocatableZig(b: *Build, opts: Options) *Step {
});
const c_o = addObject(b, opts, .{ .name = "c" });
- c_o.addObject(a_o);
- c_o.addObject(b_o);
- c_o.addObject(main_o);
+ c_o.root_module.addObject(a_o);
+ c_o.root_module.addObject(b_o);
+ c_o.root_module.addObject(main_o);
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(c_o);
+ exe.root_module.addObject(c_o);
const run = addRunArtifact(exe);
run.addCheck(.{ .expect_stderr_match = b.dupe("incrFoo=1") });
@@ -1833,10 +1833,10 @@ fn testSearchStrategy(b: *Build, opts: Options) *Step {
});
const liba = addStaticLibrary(b, opts, .{ .name = "a" });
- liba.addObject(obj);
+ liba.root_module.addObject(obj);
const dylib = addSharedLibrary(b, opts, .{ .name = "a" });
- dylib.addObject(obj);
+ dylib.root_module.addObject(obj);
const main_o = addObject(b, opts, .{ .name = "main", .c_source_bytes =
\\#include<stdio.h>
@@ -1850,7 +1850,7 @@ fn testSearchStrategy(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkSystemLibrary("a", .{ .use_pkg_config = .no, .search_strategy = .mode_first });
exe.root_module.addLibraryPath(liba.getEmittedBinDirectory());
exe.root_module.addLibraryPath(dylib.getEmittedBinDirectory());
@@ -1869,7 +1869,7 @@ fn testSearchStrategy(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkSystemLibrary("a", .{ .use_pkg_config = .no, .search_strategy = .paths_first });
exe.root_module.addLibraryPath(liba.getEmittedBinDirectory());
exe.root_module.addLibraryPath(dylib.getEmittedBinDirectory());
@@ -1924,9 +1924,9 @@ fn testSectionBoundarySymbols(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "test" });
- exe.addObject(obj1);
- exe.addObject(obj2);
- exe.addObject(main_o);
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
+ exe.root_module.addObject(main_o);
const run = b.addRunArtifact(exe);
run.skip_foreign_checks = true;
@@ -1951,9 +1951,9 @@ fn testSectionBoundarySymbols(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "test" });
- exe.addObject(obj1);
- exe.addObject(obj3);
- exe.addObject(main_o);
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj3);
+ exe.root_module.addObject(main_o);
const run = b.addRunArtifact(exe);
run.skip_foreign_checks = true;
@@ -2031,9 +2031,9 @@ fn testSegmentBoundarySymbols(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(obj1);
- exe.addObject(obj2);
- exe.addObject(main_o);
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
+ exe.root_module.addObject(main_o);
const run = addRunArtifact(exe);
run.expectStdOutEqual("All your codebase are belong to us.\n");
@@ -2054,9 +2054,9 @@ fn testSegmentBoundarySymbols(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(obj1);
- exe.addObject(obj2);
- exe.addObject(main_o);
+ exe.root_module.addObject(obj1);
+ exe.root_module.addObject(obj2);
+ exe.root_module.addObject(main_o);
const check = exe.checkObject();
check.checkInHeaders();
@@ -2102,9 +2102,9 @@ fn testSymbolStabs(b: *Build, opts: Options) *Step {
});
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(a_o);
- exe.addObject(b_o);
- exe.addObject(main_o);
+ exe.root_module.addObject(a_o);
+ exe.root_module.addObject(b_o);
+ exe.root_module.addObject(main_o);
const run = addRunArtifact(exe);
run.expectStdOutEqual("foo=42,bar=24");
@@ -2299,7 +2299,7 @@ fn testTlsPointers(b: *Build, opts: Options) *Step {
\\}
});
bar_o.root_module.addIncludePath(foo_h.dirname());
- bar_o.linkLibCpp();
+ bar_o.root_module.link_libcpp = true;
const baz_o = addObject(b, opts, .{ .name = "baz", .cpp_source_bytes =
\\#include "foo.h"
@@ -2309,7 +2309,7 @@ fn testTlsPointers(b: *Build, opts: Options) *Step {
\\}
});
baz_o.root_module.addIncludePath(foo_h.dirname());
- baz_o.linkLibCpp();
+ baz_o.root_module.link_libcpp = true;
const main_o = addObject(b, opts, .{ .name = "main", .cpp_source_bytes =
\\extern int bar();
@@ -2321,13 +2321,13 @@ fn testTlsPointers(b: *Build, opts: Options) *Step {
\\}
});
main_o.root_module.addIncludePath(foo_h.dirname());
- main_o.linkLibCpp();
+ main_o.root_module.link_libcpp = true;
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(bar_o);
- exe.addObject(baz_o);
- exe.addObject(main_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(bar_o);
+ exe.root_module.addObject(baz_o);
+ exe.root_module.addObject(main_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -2445,7 +2445,7 @@ fn testTwoLevelNamespace(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkSystemLibrary("a", .{});
exe.root_module.linkSystemLibrary("b", .{});
exe.root_module.addLibraryPath(liba.getEmittedBinDirectory());
@@ -2474,7 +2474,7 @@ fn testTwoLevelNamespace(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
+ exe.root_module.addObject(main_o);
exe.root_module.linkSystemLibrary("b", .{});
exe.root_module.linkSystemLibrary("a", .{});
exe.root_module.addLibraryPath(liba.getEmittedBinDirectory());
@@ -2510,14 +2510,14 @@ fn testDiscardLocalSymbols(b: *Build, opts: Options) *Step {
const obj = addObject(b, opts, .{ .name = "a", .c_source_bytes = "static int foo = 42;" });
const lib = addStaticLibrary(b, opts, .{ .name = "a" });
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const main_o = addObject(b, opts, .{ .name = "main", .c_source_bytes = "int main() { return 0; }" });
{
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(main_o);
- exe.addObject(obj);
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(obj);
exe.discard_local_symbols = true;
const run = addRunArtifact(exe);
@@ -2532,8 +2532,8 @@ fn testDiscardLocalSymbols(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main4" });
- exe.addObject(main_o);
- exe.linkLibrary(lib);
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(lib);
exe.discard_local_symbols = true;
const run = addRunArtifact(exe);
@@ -2555,14 +2555,14 @@ fn testUndefinedFlag(b: *Build, opts: Options) *Step {
const obj = addObject(b, opts, .{ .name = "a", .c_source_bytes = "int foo = 42;" });
const lib = addStaticLibrary(b, opts, .{ .name = "a" });
- lib.addObject(obj);
+ lib.root_module.addObject(obj);
const main_o = addObject(b, opts, .{ .name = "main", .c_source_bytes = "int main() { return 0; }" });
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
- exe.addObject(main_o);
- exe.linkLibrary(lib);
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(lib);
exe.forceUndefinedSymbol("_foo");
const run = addRunArtifact(exe);
@@ -2577,8 +2577,8 @@ fn testUndefinedFlag(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main2" });
- exe.addObject(main_o);
- exe.linkLibrary(lib);
+ exe.root_module.addObject(main_o);
+ exe.root_module.linkLibrary(lib);
exe.forceUndefinedSymbol("_foo");
exe.link_gc_sections = true;
@@ -2594,8 +2594,8 @@ fn testUndefinedFlag(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main3" });
- exe.addObject(main_o);
- exe.addObject(obj);
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(obj);
const run = addRunArtifact(exe);
run.expectExitCode(0);
@@ -2609,8 +2609,8 @@ fn testUndefinedFlag(b: *Build, opts: Options) *Step {
{
const exe = addExecutable(b, opts, .{ .name = "main4" });
- exe.addObject(main_o);
- exe.addObject(obj);
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(obj);
exe.link_gc_sections = true;
const run = addRunArtifact(exe);
@@ -2642,7 +2642,7 @@ fn testUnresolvedError(b: *Build, opts: Options) *Step {
\\ std.debug.print("foo() + bar() = {d}", .{foo() + bar()});
\\}
});
- exe.addObject(obj);
+ exe.root_module.addObject(obj);
// TODO order should match across backends if possible
if (opts.use_llvm) {
@@ -2764,7 +2764,7 @@ fn testUnwindInfo(b: *Build, opts: Options) *Step {
\\}
});
main_o.root_module.addIncludePath(all_h.dirname());
- main_o.linkLibCpp();
+ main_o.root_module.link_libcpp = true;
const simple_string_o = addObject(b, opts, .{ .name = "simple_string", .cpp_source_bytes =
\\#include "all.h"
@@ -2799,7 +2799,7 @@ fn testUnwindInfo(b: *Build, opts: Options) *Step {
\\}
});
simple_string_o.root_module.addIncludePath(all_h.dirname());
- simple_string_o.linkLibCpp();
+ simple_string_o.root_module.link_libcpp = true;
const simple_string_owner_o = addObject(b, opts, .{ .name = "simple_string_owner", .cpp_source_bytes =
\\#include "all.h"
@@ -2816,7 +2816,7 @@ fn testUnwindInfo(b: *Build, opts: Options) *Step {
\\}
});
simple_string_owner_o.root_module.addIncludePath(all_h.dirname());
- simple_string_owner_o.linkLibCpp();
+ simple_string_owner_o.root_module.link_libcpp = true;
const exp_stdout =
\\Constructed: a
@@ -2828,10 +2828,10 @@ fn testUnwindInfo(b: *Build, opts: Options) *Step {
;
const exe = addExecutable(b, opts, .{ .name = "main" });
- exe.addObject(main_o);
- exe.addObject(simple_string_o);
- exe.addObject(simple_string_owner_o);
- exe.linkLibCpp();
+ exe.root_module.addObject(main_o);
+ exe.root_module.addObject(simple_string_o);
+ exe.root_module.addObject(simple_string_owner_o);
+ exe.root_module.link_libcpp = true;
const run = addRunArtifact(exe);
run.expectStdOutEqual(exp_stdout);
@@ -2896,7 +2896,7 @@ fn testUnwindInfoNoSubsectionsArm64(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
});
- exe.addObject(a_o);
+ exe.root_module.addObject(a_o);
const run = addRunArtifact(exe);
run.expectStdOutEqual("4\n");
@@ -2948,7 +2948,7 @@ fn testUnwindInfoNoSubsectionsX64(b: *Build, opts: Options) *Step {
\\ return 0;
\\}
});
- exe.addObject(a_o);
+ exe.root_module.addObject(a_o);
const run = addRunArtifact(exe);
run.expectStdOutEqual("4\n");
@@ -3052,7 +3052,7 @@ fn testWeakBind(b: *Build, opts: Options) *Step {
\\ .quad 0
\\ .quad _weak_internal_tlv$tlv$init
});
- exe.linkLibrary(lib);
+ exe.root_module.linkLibrary(lib);
{
const check = exe.checkObject();
diff --git a/test/link/wasm/extern/build.zig b/test/link/wasm/extern/build.zig
index 4976c97b31..74036d486d 100644
--- a/test/link/wasm/extern/build.zig
+++ b/test/link/wasm/extern/build.zig
@@ -16,7 +16,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .wasi }),
}),
});
- exe.addCSourceFile(.{ .file = b.path("foo.c"), .flags = &.{} });
+ exe.root_module.addCSourceFile(.{ .file = b.path("foo.c"), .flags = &.{} });
exe.use_llvm = false;
exe.use_lld = false;
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index a92f9fd158..60a564bc16 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -436,7 +436,7 @@ fn addFromDirInner(
const target = &resolved_target.result;
for (backends) |backend| {
if (backend == .stage2 and
- target.cpu.arch != .wasm32 and target.cpu.arch != .x86_64 and target.cpu.arch != .spirv64)
+ target.cpu.arch != .aarch64 and target.cpu.arch != .wasm32 and target.cpu.arch != .x86_64 and target.cpu.arch != .spirv64)
{
// Other backends don't support new liveness format
continue;
@@ -447,10 +447,6 @@ fn addFromDirInner(
// Rosetta has issues with ZLD
continue;
}
- if (backend == .stage2 and target.ofmt == .coff) {
- // COFF linker has bitrotted
- continue;
- }
const next = ctx.cases.items.len;
try ctx.cases.append(.{
@@ -560,7 +556,7 @@ pub fn lowerToTranslateCSteps(
.root_module = translate_c.createModule(),
});
run_exe.step.name = b.fmt("{s} build-exe", .{annotated_case_name});
- run_exe.linkLibC();
+ run_exe.root_module.link_libc = true;
const run = b.addRunArtifact(run_exe);
run.step.name = b.fmt("{s} run", .{annotated_case_name});
run.expectStdOutEqual(output);
@@ -800,6 +796,8 @@ const TestManifestConfigDefaults = struct {
}
// Windows
defaults = defaults ++ "x86_64-windows" ++ ",";
+ // Wasm
+ defaults = defaults ++ "wasm32-wasi";
break :blk defaults;
};
} else if (std.mem.eql(u8, key, "output_mode")) {
diff --git a/test/src/RunTranslatedC.zig b/test/src/RunTranslatedC.zig
index 528df69c4b..537c49dcd5 100644
--- a/test/src/RunTranslatedC.zig
+++ b/test/src/RunTranslatedC.zig
@@ -89,7 +89,7 @@ pub fn addCase(self: *RunTranslatedCContext, case: *const TestCase) void {
.root_module = translate_c.createModule(),
});
exe.step.name = b.fmt("{s} build-exe", .{annotated_case_name});
- exe.linkLibC();
+ exe.root_module.link_libc = true;
const run = b.addRunArtifact(exe);
run.step.name = b.fmt("{s} run", .{annotated_case_name});
if (!case.allow_warnings) {
diff --git a/test/standalone/build.zig.zon b/test/standalone/build.zig.zon
index 8cf899477f..bdd059ab37 100644
--- a/test/standalone/build.zig.zon
+++ b/test/standalone/build.zig.zon
@@ -1,6 +1,6 @@
.{
.name = .standalone_test_cases,
- .fingerprint = 0xc0dbdf9c818957be,
+ .fingerprint = 0xc0dbdf9c3b92810b,
.version = "0.0.0",
.dependencies = .{
.simple = .{
@@ -181,6 +181,9 @@
.install_headers = .{
.path = "install_headers",
},
+ .dependency_options = .{
+ .path = "dependency_options",
+ },
.dependencyFromBuildZig = .{
.path = "dependencyFromBuildZig",
},
diff --git a/test/standalone/c_embed_path/build.zig b/test/standalone/c_embed_path/build.zig
index a314847ba6..246e18b3f0 100644
--- a/test/standalone/c_embed_path/build.zig
+++ b/test/standalone/c_embed_path/build.zig
@@ -13,12 +13,12 @@ pub fn build(b: *std.Build) void {
.optimize = optimize,
}),
});
- exe.addCSourceFile(.{
+ exe.root_module.addCSourceFile(.{
.file = b.path("test.c"),
.flags = &.{"-std=c23"},
});
- exe.linkLibC();
- exe.addEmbedPath(b.path("data"));
+ exe.root_module.link_libc = true;
+ exe.root_module.addEmbedPath(b.path("data"));
const run_c_cmd = b.addRunArtifact(exe);
run_c_cmd.expectExitCode(0);
diff --git a/test/standalone/dependencyFromBuildZig/build.zig.zon b/test/standalone/dependencyFromBuildZig/build.zig.zon
index 085ae2c80b..fda6a098d8 100644
--- a/test/standalone/dependencyFromBuildZig/build.zig.zon
+++ b/test/standalone/dependencyFromBuildZig/build.zig.zon
@@ -1,5 +1,6 @@
.{
- .name = "dependencyFromBuildZig",
+ .name = .dependencyFromBuildZig,
+ .fingerprint = 0xfd939a1eb8169080,
.version = "0.0.0",
.dependencies = .{
.other = .{
diff --git a/test/standalone/dependencyFromBuildZig/other/build.zig.zon b/test/standalone/dependencyFromBuildZig/other/build.zig.zon
index 204abdbbba..bb8fcb6fb4 100644
--- a/test/standalone/dependencyFromBuildZig/other/build.zig.zon
+++ b/test/standalone/dependencyFromBuildZig/other/build.zig.zon
@@ -1,5 +1,6 @@
.{
- .name = "other",
+ .name = .other,
+ .fingerprint = 0xd9583520a2405f6c,
.version = "0.0.0",
.dependencies = .{},
.paths = .{""},
diff --git a/test/standalone/dependency_options/build.zig b/test/standalone/dependency_options/build.zig
new file mode 100644
index 0000000000..95cde3c891
--- /dev/null
+++ b/test/standalone/dependency_options/build.zig
@@ -0,0 +1,148 @@
+const std = @import("std");
+
+pub const Enum = enum { alfa, bravo, charlie };
+
+pub fn build(b: *std.Build) !void {
+ const test_step = b.step("test", "Test passing options to a dependency");
+ b.default_step = test_step;
+
+ const none_specified = b.dependency("other", .{});
+
+ const none_specified_mod = none_specified.module("dummy");
+ if (!none_specified_mod.resolved_target.?.query.eql(b.graph.host.query)) return error.TestFailed;
+ const expected_optimize: std.builtin.OptimizeMode = switch (b.release_mode) {
+ .off => .Debug,
+ .any => unreachable,
+ .fast => .ReleaseFast,
+ .safe => .ReleaseSafe,
+ .small => .ReleaseSmall,
+ };
+ if (none_specified_mod.optimize.? != expected_optimize) return error.TestFailed;
+
+ // Passing null is the same as not specifying the option,
+ // so this should resolve to the same cached dependency instance.
+ const null_specified = b.dependency("other", .{
+ // Null literals
+ .target = null,
+ .optimize = null,
+ .bool = null,
+
+ // Optionals
+ .int = @as(?i64, null),
+ .float = @as(?f64, null),
+
+ // Optionals of the wrong type
+ .string = @as(?usize, null),
+ .@"enum" = @as(?bool, null),
+
+ // Non-defined option names
+ .this_option_does_not_exist = null,
+ .neither_does_this_one = @as(?[]const u8, null),
+ });
+
+ if (null_specified != none_specified) return error.TestFailed;
+
+ const all_specified = b.dependency("other", .{
+ .target = b.resolveTargetQuery(.{ .cpu_arch = .x86_64, .os_tag = .windows, .abi = .gnu }),
+ .optimize = @as(std.builtin.OptimizeMode, .ReleaseSafe),
+ .bool = @as(bool, true),
+ .int = @as(i64, 123),
+ .float = @as(f64, 0.5),
+ .string = @as([]const u8, "abc"),
+ .string_list = @as([]const []const u8, &.{ "a", "b", "c" }),
+ .lazy_path = @as(std.Build.LazyPath, .{ .cwd_relative = "abc.txt" }),
+ .lazy_path_list = @as([]const std.Build.LazyPath, &.{
+ .{ .cwd_relative = "a.txt" },
+ .{ .cwd_relative = "b.txt" },
+ .{ .cwd_relative = "c.txt" },
+ }),
+ .@"enum" = @as(Enum, .alfa),
+ .enum_list = @as([]const Enum, &.{ .alfa, .bravo, .charlie }),
+ .build_id = @as(std.zig.BuildId, .uuid),
+ .hex_build_id = std.zig.BuildId.initHexString("\x12\x34\xcd\xef"),
+ });
+
+ const all_specified_mod = all_specified.module("dummy");
+ if (all_specified_mod.resolved_target.?.result.cpu.arch != .x86_64) return error.TestFailed;
+ if (all_specified_mod.resolved_target.?.result.os.tag != .windows) return error.TestFailed;
+ if (all_specified_mod.resolved_target.?.result.abi != .gnu) return error.TestFailed;
+ if (all_specified_mod.optimize.? != .ReleaseSafe) return error.TestFailed;
+
+ const all_specified_optional = b.dependency("other", .{
+ .target = @as(?std.Build.ResolvedTarget, b.resolveTargetQuery(.{ .cpu_arch = .x86_64, .os_tag = .windows, .abi = .gnu })),
+ .optimize = @as(?std.builtin.OptimizeMode, .ReleaseSafe),
+ .bool = @as(?bool, true),
+ .int = @as(?i64, 123),
+ .float = @as(?f64, 0.5),
+ .string = @as(?[]const u8, "abc"),
+ .string_list = @as(?[]const []const u8, &.{ "a", "b", "c" }),
+ .lazy_path = @as(?std.Build.LazyPath, .{ .cwd_relative = "abc.txt" }),
+ .lazy_path_list = @as(?[]const std.Build.LazyPath, &.{
+ .{ .cwd_relative = "a.txt" },
+ .{ .cwd_relative = "b.txt" },
+ .{ .cwd_relative = "c.txt" },
+ }),
+ .@"enum" = @as(?Enum, .alfa),
+ .enum_list = @as(?[]const Enum, &.{ .alfa, .bravo, .charlie }),
+ .build_id = @as(?std.zig.BuildId, .uuid),
+ .hex_build_id = @as(?std.zig.BuildId, .initHexString("\x12\x34\xcd\xef")),
+ });
+
+ if (all_specified_optional != all_specified) return error.TestFailed;
+
+ const all_specified_literal = b.dependency("other", .{
+ .target = b.resolveTargetQuery(.{ .cpu_arch = .x86_64, .os_tag = .windows, .abi = .gnu }),
+ .optimize = .ReleaseSafe,
+ .bool = true,
+ .int = 123,
+ .float = 0.5,
+ .string = "abc",
+ .string_list = &[_][]const u8{ "a", "b", "c" },
+ .lazy_path = @as(std.Build.LazyPath, .{ .cwd_relative = "abc.txt" }),
+ .lazy_path_list = &[_]std.Build.LazyPath{
+ .{ .cwd_relative = "a.txt" },
+ .{ .cwd_relative = "b.txt" },
+ .{ .cwd_relative = "c.txt" },
+ },
+ .@"enum" = .alfa,
+ .enum_list = &[_]Enum{ .alfa, .bravo, .charlie },
+ .build_id = .uuid,
+ .hex_build_id = std.zig.BuildId.initHexString("\x12\x34\xcd\xef"),
+ });
+
+ if (all_specified_literal != all_specified) return error.TestFailed;
+
+ var mut_string_buf = "abc".*;
+ const mut_string: []u8 = &mut_string_buf;
+ var mut_string_list_buf = [_][]const u8{ "a", "b", "c" };
+ const mut_string_list: [][]const u8 = &mut_string_list_buf;
+ var mut_lazy_path_list_buf = [_]std.Build.LazyPath{
+ .{ .cwd_relative = "a.txt" },
+ .{ .cwd_relative = "b.txt" },
+ .{ .cwd_relative = "c.txt" },
+ };
+ const mut_lazy_path_list: []std.Build.LazyPath = &mut_lazy_path_list_buf;
+ var mut_enum_list_buf = [_]Enum{ .alfa, .bravo, .charlie };
+ const mut_enum_list: []Enum = &mut_enum_list_buf;
+
+ // Most supported option types are serialized to a string representation,
+ // so alternative representations of the same option value should resolve
+ // to the same cached dependency instance.
+ const all_specified_alt = b.dependency("other", .{
+ .target = @as(std.Target.Query, .{ .cpu_arch = .x86_64, .os_tag = .windows, .abi = .gnu }),
+ .optimize = "ReleaseSafe",
+ .bool = .true,
+ .int = "123",
+ .float = @as(f16, 0.5),
+ .string = mut_string,
+ .string_list = mut_string_list,
+ .lazy_path = @as(std.Build.LazyPath, .{ .cwd_relative = "abc.txt" }),
+ .lazy_path_list = mut_lazy_path_list,
+ .@"enum" = "alfa",
+ .enum_list = mut_enum_list,
+ .build_id = "uuid",
+ .hex_build_id = "0x1234cdef",
+ });
+
+ if (all_specified_alt != all_specified) return error.TestFailed;
+}
diff --git a/test/standalone/dependency_options/build.zig.zon b/test/standalone/dependency_options/build.zig.zon
new file mode 100644
index 0000000000..6788640a80
--- /dev/null
+++ b/test/standalone/dependency_options/build.zig.zon
@@ -0,0 +1,11 @@
+.{
+ .name = .dependency_options,
+ .fingerprint = 0x3e3ce1c1f92ba47e,
+ .version = "0.0.0",
+ .dependencies = .{
+ .other = .{
+ .path = "other",
+ },
+ },
+ .paths = .{""},
+}
diff --git a/test/standalone/dependency_options/other/build.zig b/test/standalone/dependency_options/other/build.zig
new file mode 100644
index 0000000000..c18f92f14d
--- /dev/null
+++ b/test/standalone/dependency_options/other/build.zig
@@ -0,0 +1,59 @@
+const std = @import("std");
+
+pub const Enum = enum { alfa, bravo, charlie };
+
+pub fn build(b: *std.Build) !void {
+ const target = b.standardTargetOptions(.{});
+ const optimize = b.standardOptimizeOption(.{});
+
+ const expected_bool: bool = true;
+ const expected_int: i64 = 123;
+ const expected_float: f64 = 0.5;
+ const expected_string: []const u8 = "abc";
+ const expected_string_list: []const []const u8 = &.{ "a", "b", "c" };
+ const expected_lazy_path: std.Build.LazyPath = .{ .cwd_relative = "abc.txt" };
+ const expected_lazy_path_list: []const std.Build.LazyPath = &.{
+ .{ .cwd_relative = "a.txt" },
+ .{ .cwd_relative = "b.txt" },
+ .{ .cwd_relative = "c.txt" },
+ };
+ const expected_enum: Enum = .alfa;
+ const expected_enum_list: []const Enum = &.{ .alfa, .bravo, .charlie };
+ const expected_build_id: std.zig.BuildId = .uuid;
+ const expected_hex_build_id: std.zig.BuildId = .initHexString("\x12\x34\xcd\xef");
+
+ const @"bool" = b.option(bool, "bool", "bool") orelse expected_bool;
+ const int = b.option(i64, "int", "int") orelse expected_int;
+ const float = b.option(f64, "float", "float") orelse expected_float;
+ const string = b.option([]const u8, "string", "string") orelse expected_string;
+ const string_list = b.option([]const []const u8, "string_list", "string_list") orelse expected_string_list;
+ const lazy_path = b.option(std.Build.LazyPath, "lazy_path", "lazy_path") orelse expected_lazy_path;
+ const lazy_path_list = b.option([]const std.Build.LazyPath, "lazy_path_list", "lazy_path_list") orelse expected_lazy_path_list;
+ const @"enum" = b.option(Enum, "enum", "enum") orelse expected_enum;
+ const enum_list = b.option([]const Enum, "enum_list", "enum_list") orelse expected_enum_list;
+ const build_id = b.option(std.zig.BuildId, "build_id", "build_id") orelse expected_build_id;
+ const hex_build_id = b.option(std.zig.BuildId, "hex_build_id", "hex_build_id") orelse expected_hex_build_id;
+
+ if (@"bool" != expected_bool) return error.TestFailed;
+ if (int != expected_int) return error.TestFailed;
+ if (float != expected_float) return error.TestFailed;
+ if (!std.mem.eql(u8, string, expected_string)) return error.TestFailed;
+ if (string_list.len != expected_string_list.len) return error.TestFailed;
+ for (string_list, expected_string_list) |x, y| {
+ if (!std.mem.eql(u8, x, y)) return error.TestFailed;
+ }
+ if (!std.mem.eql(u8, lazy_path.cwd_relative, expected_lazy_path.cwd_relative)) return error.TestFailed;
+ for (lazy_path_list, expected_lazy_path_list) |x, y| {
+ if (!std.mem.eql(u8, x.cwd_relative, y.cwd_relative)) return error.TestFailed;
+ }
+ if (@"enum" != expected_enum) return error.TestFailed;
+ if (!std.mem.eql(Enum, enum_list, expected_enum_list)) return error.TestFailed;
+ if (!std.meta.eql(build_id, expected_build_id)) return error.TestFailed;
+ if (!hex_build_id.eql(expected_hex_build_id)) return error.TestFailed;
+
+ _ = b.addModule("dummy", .{
+ .root_source_file = b.path("build.zig"),
+ .target = target,
+ .optimize = optimize,
+ });
+}
diff --git a/test/standalone/dependency_options/other/build.zig.zon b/test/standalone/dependency_options/other/build.zig.zon
new file mode 100644
index 0000000000..d49a2cdcf8
--- /dev/null
+++ b/test/standalone/dependency_options/other/build.zig.zon
@@ -0,0 +1,7 @@
+.{
+ .name = .other,
+ .fingerprint = 0xd95835207bc8b630,
+ .version = "0.0.0",
+ .dependencies = .{},
+ .paths = .{""},
+}
diff --git a/test/standalone/extern/build.zig b/test/standalone/extern/build.zig
index 3c22f77f2a..178fa76d41 100644
--- a/test/standalone/extern/build.zig
+++ b/test/standalone/extern/build.zig
@@ -31,8 +31,8 @@ pub fn build(b: *std.Build) void {
.target = b.graph.host,
.optimize = optimize,
}) });
- test_exe.addObject(obj);
- test_exe.linkLibrary(shared);
+ test_exe.root_module.addObject(obj);
+ test_exe.root_module.linkLibrary(shared);
test_step.dependOn(&b.addRunArtifact(test_exe).step);
}
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index 4b0c089f97..0f3f0a16f7 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -8,7 +8,7 @@ pub fn build(b: *std.Build) void {
.root_source_file = b.path("main.zig"),
.target = b.graph.host,
}) });
- test_artifact.addIncludePath(b.path("a_directory"));
+ test_artifact.root_module.addIncludePath(b.path("a_directory"));
// TODO: actually check the output
_ = test_artifact.getEmittedBin();
diff --git a/test/standalone/stack_iterator/build.zig b/test/standalone/stack_iterator/build.zig
index a036a64ab7..8d2c448215 100644
--- a/test/standalone/stack_iterator/build.zig
+++ b/test/standalone/stack_iterator/build.zig
@@ -65,69 +65,70 @@ pub fn build(b: *std.Build) void {
test_step.dependOn(&run_cmd.step);
}
- // Unwinding through a C shared library without a frame pointer (libc)
- //
- // getcontext version: libc
- //
- // Unwind info type:
- // - ELF: DWARF .eh_frame + .debug_frame
- // - MachO: __unwind_info encodings:
- // - x86_64: STACK_IMMD, STACK_IND
- // - aarch64: FRAMELESS, DWARF
- {
- const c_shared_lib = b.addLibrary(.{
- .linkage = .dynamic,
- .name = "c_shared_lib",
- .root_module = b.createModule(.{
- .root_source_file = null,
- .target = target,
- .optimize = optimize,
- .link_libc = true,
- .strip = false,
- }),
- });
-
- if (target.result.os.tag == .windows)
- c_shared_lib.root_module.addCMacro("LIB_API", "__declspec(dllexport)");
-
- c_shared_lib.root_module.addCSourceFile(.{
- .file = b.path("shared_lib.c"),
- .flags = &.{"-fomit-frame-pointer"},
- });
-
- const exe = b.addExecutable(.{
- .name = "shared_lib_unwind",
- .root_module = b.createModule(.{
- .root_source_file = b.path("shared_lib_unwind.zig"),
- .target = target,
- .optimize = optimize,
- .unwind_tables = if (target.result.os.tag.isDarwin()) .async else null,
- .omit_frame_pointer = true,
- }),
- // zig objcopy doesn't support incremental binaries
- .use_llvm = true,
- });
-
- exe.linkLibrary(c_shared_lib);
-
- const run_cmd = b.addRunArtifact(exe);
- test_step.dependOn(&run_cmd.step);
-
- // Separate debug info ELF file
- if (target.result.ofmt == .elf) {
- const filename = b.fmt("{s}_stripped", .{exe.out_filename});
- const stripped_exe = b.addObjCopy(exe.getEmittedBin(), .{
- .basename = filename, // set the name for the debuglink
- .compress_debug = true,
- .strip = .debug,
- .extract_to_separate_file = true,
- });
-
- const run_stripped = std.Build.Step.Run.create(b, b.fmt("run {s}", .{filename}));
- run_stripped.addFileArg(stripped_exe.getOutput());
- test_step.dependOn(&run_stripped.step);
- }
- }
+ // https://github.com/ziglang/zig/issues/24522
+ //// Unwinding through a C shared library without a frame pointer (libc)
+ ////
+ //// getcontext version: libc
+ ////
+ //// Unwind info type:
+ //// - ELF: DWARF .eh_frame + .debug_frame
+ //// - MachO: __unwind_info encodings:
+ //// - x86_64: STACK_IMMD, STACK_IND
+ //// - aarch64: FRAMELESS, DWARF
+ //{
+ // const c_shared_lib = b.addLibrary(.{
+ // .linkage = .dynamic,
+ // .name = "c_shared_lib",
+ // .root_module = b.createModule(.{
+ // .root_source_file = null,
+ // .target = target,
+ // .optimize = optimize,
+ // .link_libc = true,
+ // .strip = false,
+ // }),
+ // });
+
+ // if (target.result.os.tag == .windows)
+ // c_shared_lib.root_module.addCMacro("LIB_API", "__declspec(dllexport)");
+
+ // c_shared_lib.root_module.addCSourceFile(.{
+ // .file = b.path("shared_lib.c"),
+ // .flags = &.{"-fomit-frame-pointer"},
+ // });
+
+ // const exe = b.addExecutable(.{
+ // .name = "shared_lib_unwind",
+ // .root_module = b.createModule(.{
+ // .root_source_file = b.path("shared_lib_unwind.zig"),
+ // .target = target,
+ // .optimize = optimize,
+ // .unwind_tables = if (target.result.os.tag.isDarwin()) .async else null,
+ // .omit_frame_pointer = true,
+ // }),
+ // // zig objcopy doesn't support incremental binaries
+ // .use_llvm = true,
+ // });
+
+ // exe.root_module.linkLibrary(c_shared_lib);
+
+ // const run_cmd = b.addRunArtifact(exe);
+ // test_step.dependOn(&run_cmd.step);
+
+ // // Separate debug info ELF file
+ // if (target.result.ofmt == .elf) {
+ // const filename = b.fmt("{s}_stripped", .{exe.out_filename});
+ // const stripped_exe = b.addObjCopy(exe.getEmittedBin(), .{
+ // .basename = filename, // set the name for the debuglink
+ // .compress_debug = true,
+ // .strip = .debug,
+ // .extract_to_separate_file = true,
+ // });
+
+ // const run_stripped = std.Build.Step.Run.create(b, b.fmt("run {s}", .{filename}));
+ // run_stripped.addFileArg(stripped_exe.getOutput());
+ // test_step.dependOn(&run_stripped.step);
+ // }
+ //}
// Unwinding without libc/posix
//
diff --git a/test/tests.zig b/test/tests.zig
index db4407172f..a12312d278 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -116,8 +116,6 @@ const test_targets = blk: {
.abi = .eabihf,
},
.link_libc = true,
- // https://github.com/ziglang/zig/issues/23949
- .skip_modules = &.{"std"},
},
.{
@@ -193,6 +191,30 @@ const test_targets = blk: {
.{
.target = .{
+ .cpu_arch = .aarch64,
+ .os_tag = .linux,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+ .{
+ .target = .{
+ .cpu_arch = .aarch64,
+ .cpu_model = .{ .explicit = &std.Target.aarch64.cpu.neoverse_n1 },
+ .os_tag = .linux,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+
+ .{
+ .target = .{
.cpu_arch = .aarch64_be,
.os_tag = .linux,
.abi = .none,
@@ -1184,6 +1206,18 @@ const test_targets = blk: {
.{
.target = .{
+ .cpu_arch = .aarch64,
+ .os_tag = .macos,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+
+ .{
+ .target = .{
.cpu_arch = .x86_64,
.os_tag = .macos,
.abi = .none,
@@ -1335,16 +1369,15 @@ const test_targets = blk: {
// WASI Targets
- // TODO: lowerTry for pointers
- //.{
- // .target = .{
- // .cpu_arch = .wasm32,
- // .os_tag = .wasi,
- // .abi = .none,
- // },
- // .use_llvm = false,
- // .use_lld = false,
- //},
+ .{
+ .target = .{
+ .cpu_arch = .wasm32,
+ .os_tag = .wasi,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ },
.{
.target = .{
.cpu_arch = .wasm32,
@@ -1983,6 +2016,16 @@ pub fn addCliTests(b: *std.Build) *Step {
step.dependOn(&cleanup.step);
}
+ {
+ // Test `zig init -m`.
+ const tmp_path = b.makeTempPath();
+ const init_exe = b.addSystemCommand(&.{ b.graph.zig_exe, "init", "-m" });
+ init_exe.setCwd(.{ .cwd_relative = tmp_path });
+ init_exe.setName("zig init -m");
+ init_exe.expectStdOutEqual("");
+ init_exe.expectStdErrEqual("info: successfully populated 'build.zig.zon' and 'build.zig'\n");
+ }
+
// Test Godbolt API
if (builtin.os.tag == .linux and builtin.cpu.arch == .x86_64) {
const tmp_path = b.makeTempPath();
@@ -2260,7 +2303,7 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
continue;
// TODO get compiler-rt tests passing for self-hosted backends.
- if ((target.cpu.arch != .x86_64 or target.ofmt != .elf) and
+ if (((target.cpu.arch != .x86_64 and target.cpu.arch != .aarch64) or target.ofmt == .coff) and
test_target.use_llvm == false and mem.eql(u8, options.name, "compiler-rt"))
continue;
@@ -2328,10 +2371,10 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
} else "";
const use_pic = if (test_target.pic == true) "-pic" else "";
- for (options.include_paths) |include_path| these_tests.addIncludePath(b.path(include_path));
+ for (options.include_paths) |include_path| these_tests.root_module.addIncludePath(b.path(include_path));
if (target.os.tag == .windows) {
- for (options.windows_libs) |lib| these_tests.linkSystemLibrary(lib);
+ for (options.windows_libs) |lib| these_tests.root_module.linkSystemLibrary(lib, .{});
}
const qualified_name = b.fmt("{s}-{s}-{s}-{s}{s}{s}{s}{s}{s}{s}", .{
diff --git a/tools/docgen.zig b/tools/docgen.zig
index 9f98968c9e..3de9da8c88 100644
--- a/tools/docgen.zig
+++ b/tools/docgen.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const io = std.io;
const fs = std.fs;
const process = std.process;
-const ChildProcess = std.process.Child;
const Progress = std.Progress;
const print = std.debug.print;
const mem = std.mem;
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index ed60d7e67d..9fdd63eda1 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -310,7 +310,8 @@ pub fn main() !void {
build_all_path, libc_so_path, @errorName(err),
});
};
- const header = try elf.Header.parse(elf_bytes[0..@sizeOf(elf.Elf64_Ehdr)]);
+ var stream: std.Io.Reader = .fixed(elf_bytes);
+ const header = try elf.Header.read(&stream);
const parse: Parse = .{
.arena = arena,
diff --git a/tools/incr-check.zig b/tools/incr-check.zig
index 08b8a21e3b..c187c84ae5 100644
--- a/tools/incr-check.zig
+++ b/tools/incr-check.zig
@@ -186,7 +186,7 @@ pub fn main() !void {
try child.spawn();
- var poller = std.io.poll(arena, Eval.StreamEnum, .{
+ var poller = std.Io.poll(arena, Eval.StreamEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
@@ -247,19 +247,15 @@ const Eval = struct {
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
const arena = eval.arena;
- const Header = std.zig.Server.Message.Header;
- const stdout = poller.fifo(.stdout);
- const stderr = poller.fifo(.stderr);
+ const stdout = poller.reader(.stdout);
+ const stderr = poller.reader(.stderr);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try poller.poll())) break :poll;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try poller.poll())) break :poll;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.error_bundle => {
@@ -277,8 +273,8 @@ const Eval = struct {
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
- if (stderr.readableLength() > 0) {
- const stderr_data = try stderr.toOwnedSlice();
+ if (stderr.bufferedLen() > 0) {
+ const stderr_data = try poller.toOwnedSlice(.stderr);
if (eval.allow_stderr) {
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
} else {
@@ -289,15 +285,14 @@ const Eval = struct {
try eval.checkErrorOutcome(update, result_error_bundle);
}
// This message indicates the end of the update.
- stdout.discard(body.len);
return;
},
.emit_digest => {
const EbpHdr = std.zig.Server.Message.EmitDigest;
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
_ = ebp_hdr;
- if (stderr.readableLength() > 0) {
- const stderr_data = try stderr.toOwnedSlice();
+ if (stderr.bufferedLen() > 0) {
+ const stderr_data = try poller.toOwnedSlice(.stderr);
if (eval.allow_stderr) {
std.log.info("emit_digest included stderr:\n{s}", .{stderr_data});
} else {
@@ -308,7 +303,6 @@ const Eval = struct {
if (eval.target.backend == .sema) {
try eval.checkSuccessOutcome(update, null, prog_node);
// This message indicates the end of the update.
- stdout.discard(body.len);
}
const digest = body[@sizeOf(EbpHdr)..][0..Cache.bin_digest_len];
@@ -323,21 +317,18 @@ const Eval = struct {
try eval.checkSuccessOutcome(update, bin_path, prog_node);
// This message indicates the end of the update.
- stdout.discard(body.len);
},
else => {
// Ignore other messages.
- stdout.discard(body.len);
},
}
}
- if (stderr.readableLength() > 0) {
- const stderr_data = try stderr.toOwnedSlice();
+ if (stderr.bufferedLen() > 0) {
if (eval.allow_stderr) {
- std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
+ std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr.buffered() });
} else {
- eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
+ eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr.buffered() });
}
}
@@ -537,25 +528,19 @@ const Eval = struct {
fn end(eval: *Eval, poller: *Poller) !void {
requestExit(eval.child, eval);
- const Header = std.zig.Server.Message.Header;
- const stdout = poller.fifo(.stdout);
- const stderr = poller.fifo(.stderr);
+ const stdout = poller.reader(.stdout);
+ const stderr = poller.reader(.stderr);
poll: while (true) {
- while (stdout.readableLength() < @sizeOf(Header)) {
- if (!(try poller.poll())) break :poll;
- }
- const header = stdout.reader().readStruct(Header) catch unreachable;
- while (stdout.readableLength() < header.bytes_len) {
- if (!(try poller.poll())) break :poll;
- }
- const body = stdout.readableSliceOfLen(header.bytes_len);
- stdout.discard(body.len);
+ const Header = std.zig.Server.Message.Header;
+ while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
+ const header = stdout.takeStruct(Header, .little) catch unreachable;
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
+ stdout.toss(header.bytes_len);
}
- if (stderr.readableLength() > 0) {
- const stderr_data = try stderr.toOwnedSlice();
- eval.fatal("unexpected stderr:\n{s}", .{stderr_data});
+ if (stderr.bufferedLen() > 0) {
+ eval.fatal("unexpected stderr:\n{s}", .{stderr.buffered()});
}
}