diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-10-02 10:45:56 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2021-10-02 10:45:56 -0700 |
| commit | dde0adcb363f3a3f306c0fc9eaec511cc3b74965 (patch) | |
| tree | 9388d039a0b77211936c7264f5a3179f63ad51e6 /lib/libunwind/src | |
| parent | c4cd592f0e1eeff5a4056796610d97010ae4e38c (diff) | |
| parent | 7a2624c3e40e2386a4a8a775b839e1d67608ec42 (diff) | |
| download | zig-dde0adcb363f3a3f306c0fc9eaec511cc3b74965.tar.gz zig-dde0adcb363f3a3f306c0fc9eaec511cc3b74965.zip | |
Merge branch 'llvm13'
Diffstat (limited to 'lib/libunwind/src')
| -rw-r--r-- | lib/libunwind/src/DwarfInstructions.hpp | 17 | ||||
| -rw-r--r-- | lib/libunwind/src/Registers.hpp | 123 | ||||
| -rw-r--r-- | lib/libunwind/src/Unwind-EHABI.cpp | 2 | ||||
| -rw-r--r-- | lib/libunwind/src/UnwindCursor.hpp | 16 | ||||
| -rw-r--r-- | lib/libunwind/src/UnwindRegistersRestore.S | 384 | ||||
| -rw-r--r-- | lib/libunwind/src/UnwindRegistersSave.S | 420 | ||||
| -rw-r--r-- | lib/libunwind/src/assembly.h | 58 | ||||
| -rw-r--r-- | lib/libunwind/src/config.h | 11 | ||||
| -rw-r--r-- | lib/libunwind/src/libunwind.cpp | 15 |
9 files changed, 589 insertions, 457 deletions
diff --git a/lib/libunwind/src/DwarfInstructions.hpp b/lib/libunwind/src/DwarfInstructions.hpp index c39cabe1f7..686c6be0d8 100644 --- a/lib/libunwind/src/DwarfInstructions.hpp +++ b/lib/libunwind/src/DwarfInstructions.hpp @@ -167,6 +167,16 @@ int DwarfInstructions<A, R>::stepWithDwarf(A &addressSpace, pint_t pc, // restore registers that DWARF says were saved R newRegisters = registers; + + // Typically, the CFA is the stack pointer at the call site in + // the previous frame. However, there are scenarios in which this is not + // true. For example, if we switched to a new stack. In that case, the + // value of the previous SP might be indicated by a CFI directive. + // + // We set the SP here to the CFA, allowing for it to be overridden + // by a CFI directive later on. + newRegisters.setSP(cfa); + pint_t returnAddress = 0; const int lastReg = R::lastDwarfRegNum(); assert(static_cast<int>(CFI_Parser<A>::kMaxRegisterNumber) >= lastReg && @@ -200,10 +210,6 @@ int DwarfInstructions<A, R>::stepWithDwarf(A &addressSpace, pint_t pc, } } - // By definition, the CFA is the stack pointer at the call site, so - // restoring SP means setting it to CFA. - newRegisters.setSP(cfa); - isSignalFrame = cieInfo.isSignalFrame; #if defined(_LIBUNWIND_TARGET_AARCH64) @@ -213,7 +219,8 @@ int DwarfInstructions<A, R>::stepWithDwarf(A &addressSpace, pint_t pc, // restored. autia1716 is used instead of autia as autia1716 assembles // to a NOP on pre-v8.3a architectures. if ((R::getArch() == REGISTERS_ARM64) && - prolog.savedRegisters[UNW_ARM64_RA_SIGN_STATE].value) { + prolog.savedRegisters[UNW_ARM64_RA_SIGN_STATE].value && + returnAddress != 0) { #if !defined(_LIBUNWIND_IS_NATIVE_ONLY) return UNW_ECROSSRASIGNING; #else diff --git a/lib/libunwind/src/Registers.hpp b/lib/libunwind/src/Registers.hpp index efeaf43559..aea84cc227 100644 --- a/lib/libunwind/src/Registers.hpp +++ b/lib/libunwind/src/Registers.hpp @@ -1849,31 +1849,39 @@ inline bool Registers_arm64::validRegister(int regNum) const { return false; if (regNum == UNW_ARM64_RA_SIGN_STATE) return true; - if ((regNum > 31) && (regNum < 64)) + if ((regNum > 32) && (regNum < 64)) return false; return true; } inline uint64_t Registers_arm64::getRegister(int regNum) const { - if (regNum == UNW_REG_IP) + if (regNum == UNW_REG_IP || regNum == UNW_ARM64_PC) return _registers.__pc; - if (regNum == UNW_REG_SP) + if (regNum == UNW_REG_SP || regNum == UNW_ARM64_SP) return _registers.__sp; if (regNum == UNW_ARM64_RA_SIGN_STATE) return _registers.__ra_sign_state; - if ((regNum >= 0) && (regNum < 32)) + if (regNum == UNW_ARM64_FP) + return _registers.__fp; + if (regNum == UNW_ARM64_LR) + return _registers.__lr; + if ((regNum >= 0) && (regNum < 29)) return _registers.__x[regNum]; _LIBUNWIND_ABORT("unsupported arm64 register"); } inline void Registers_arm64::setRegister(int regNum, uint64_t value) { - if (regNum == UNW_REG_IP) + if (regNum == UNW_REG_IP || regNum == UNW_ARM64_PC) _registers.__pc = value; - else if (regNum == UNW_REG_SP) + else if (regNum == UNW_REG_SP || regNum == UNW_ARM64_SP) _registers.__sp = value; else if (regNum == UNW_ARM64_RA_SIGN_STATE) _registers.__ra_sign_state = value; - else if ((regNum >= 0) && (regNum < 32)) + else if (regNum == UNW_ARM64_FP) + _registers.__fp = value; + else if (regNum == UNW_ARM64_LR) + _registers.__lr = value; + else if ((regNum >= 0) && (regNum < 29)) _registers.__x[regNum] = value; else _LIBUNWIND_ABORT("unsupported arm64 register"); @@ -1943,12 +1951,14 @@ inline const char *Registers_arm64::getRegisterName(int regNum) { return "x27"; case UNW_ARM64_X28: return "x28"; - case UNW_ARM64_X29: + case UNW_ARM64_FP: return "fp"; - case UNW_ARM64_X30: + case UNW_ARM64_LR: return "lr"; - case UNW_ARM64_X31: + case UNW_ARM64_SP: return "sp"; + case UNW_ARM64_PC: + return "pc"; case UNW_ARM64_D0: return "d0"; case UNW_ARM64_D1: @@ -3718,19 +3728,51 @@ inline const char *Registers_hexagon::getRegisterName(int regNum) { #if defined(_LIBUNWIND_TARGET_RISCV) -/// Registers_riscv holds the register state of a thread in a 64-bit RISC-V +/// Registers_riscv holds the register state of a thread in a RISC-V /// process. + +// This check makes it safe when LIBUNWIND_ENABLE_CROSS_UNWINDING enabled. +# ifdef __riscv +# if __riscv_xlen == 32 +typedef uint32_t reg_t; +# elif __riscv_xlen == 64 +typedef uint64_t reg_t; +# else +# error "Unsupported __riscv_xlen" +# endif + +# if defined(__riscv_flen) +# if __riscv_flen == 64 +typedef double fp_t; +# elif __riscv_flen == 32 +typedef float fp_t; +# else +# error "Unsupported __riscv_flen" +# endif +# else +// This is just for supressing undeclared error of fp_t. +typedef double fp_t; +# endif +# else +// Use Max possible width when cross unwinding +typedef uint64_t reg_t; +typedef double fp_t; +# define __riscv_xlen 64 +# define __riscv_flen 64 +#endif + +/// Registers_riscv holds the register state of a thread. class _LIBUNWIND_HIDDEN Registers_riscv { public: Registers_riscv(); Registers_riscv(const void *registers); bool validRegister(int num) const; - uint64_t getRegister(int num) const; - void setRegister(int num, uint64_t value); + reg_t getRegister(int num) const; + void setRegister(int num, reg_t value); bool validFloatRegister(int num) const; - double getFloatRegister(int num) const; - void setFloatRegister(int num, double value); + fp_t getFloatRegister(int num) const; + void setFloatRegister(int num, fp_t value); bool validVectorRegister(int num) const; v128 getVectorRegister(int num) const; void setVectorRegister(int num, v128 value); @@ -3739,31 +3781,45 @@ public: static int lastDwarfRegNum() { return _LIBUNWIND_HIGHEST_DWARF_REGISTER_RISCV; } static int getArch() { return REGISTERS_RISCV; } - uint64_t getSP() const { return _registers[2]; } - void setSP(uint64_t value) { _registers[2] = value; } - uint64_t getIP() const { return _registers[0]; } - void setIP(uint64_t value) { _registers[0] = value; } + reg_t getSP() const { return _registers[2]; } + void setSP(reg_t value) { _registers[2] = value; } + reg_t getIP() const { return _registers[0]; } + void setIP(reg_t value) { _registers[0] = value; } private: // _registers[0] holds the pc - uint64_t _registers[32]; - double _floats[32]; + reg_t _registers[32]; +# if defined(__riscv_flen) + fp_t _floats[32]; +# endif }; inline Registers_riscv::Registers_riscv(const void *registers) { static_assert((check_fit<Registers_riscv, unw_context_t>::does_fit), "riscv registers do not fit into unw_context_t"); memcpy(&_registers, registers, sizeof(_registers)); +# if __riscv_xlen == 32 + static_assert(sizeof(_registers) == 0x80, + "expected float registers to be at offset 128"); +# elif __riscv_xlen == 64 static_assert(sizeof(_registers) == 0x100, "expected float registers to be at offset 256"); +# else +# error "Unexpected float registers." +# endif + +# if defined(__riscv_flen) memcpy(_floats, static_cast<const uint8_t *>(registers) + sizeof(_registers), sizeof(_floats)); +# endif } inline Registers_riscv::Registers_riscv() { memset(&_registers, 0, sizeof(_registers)); +# if defined(__riscv_flen) memset(&_floats, 0, sizeof(_floats)); +# endif } inline bool Registers_riscv::validRegister(int regNum) const { @@ -3778,7 +3834,7 @@ inline bool Registers_riscv::validRegister(int regNum) const { return true; } -inline uint64_t Registers_riscv::getRegister(int regNum) const { +inline reg_t Registers_riscv::getRegister(int regNum) const { if (regNum == UNW_REG_IP) return _registers[0]; if (regNum == UNW_REG_SP) @@ -3790,7 +3846,7 @@ inline uint64_t Registers_riscv::getRegister(int regNum) const { _LIBUNWIND_ABORT("unsupported riscv register"); } -inline void Registers_riscv::setRegister(int regNum, uint64_t value) { +inline void Registers_riscv::setRegister(int regNum, reg_t value) { if (regNum == UNW_REG_IP) _registers[0] = value; else if (regNum == UNW_REG_SP) @@ -3944,32 +4000,37 @@ inline const char *Registers_riscv::getRegisterName(int regNum) { } inline bool Registers_riscv::validFloatRegister(int regNum) const { +# if defined(__riscv_flen) if (regNum < UNW_RISCV_F0) return false; if (regNum > UNW_RISCV_F31) return false; return true; +# else + (void)regNum; + return false; +# endif } -inline double Registers_riscv::getFloatRegister(int regNum) const { -#if defined(__riscv_flen) && __riscv_flen == 64 +inline fp_t Registers_riscv::getFloatRegister(int regNum) const { +# if defined(__riscv_flen) assert(validFloatRegister(regNum)); return _floats[regNum - UNW_RISCV_F0]; -#else +# else (void)regNum; _LIBUNWIND_ABORT("libunwind not built with float support"); -#endif +# endif } -inline void Registers_riscv::setFloatRegister(int regNum, double value) { -#if defined(__riscv_flen) && __riscv_flen == 64 +inline void Registers_riscv::setFloatRegister(int regNum, fp_t value) { +# if defined(__riscv_flen) assert(validFloatRegister(regNum)); _floats[regNum - UNW_RISCV_F0] = value; -#else +# else (void)regNum; (void)value; _LIBUNWIND_ABORT("libunwind not built with float support"); -#endif +# endif } inline bool Registers_riscv::validVectorRegister(int) const { diff --git a/lib/libunwind/src/Unwind-EHABI.cpp b/lib/libunwind/src/Unwind-EHABI.cpp index 32b5cbc3be..8843db7f54 100644 --- a/lib/libunwind/src/Unwind-EHABI.cpp +++ b/lib/libunwind/src/Unwind-EHABI.cpp @@ -97,9 +97,11 @@ _Unwind_Reason_Code ProcessDescriptors( case Descriptor::LU32: descriptor = getNextWord(descriptor, &length); descriptor = getNextWord(descriptor, &offset); + break; case Descriptor::LU16: descriptor = getNextNibble(descriptor, &length); descriptor = getNextNibble(descriptor, &offset); + break; default: assert(false); return _URC_FAILURE; diff --git a/lib/libunwind/src/UnwindCursor.hpp b/lib/libunwind/src/UnwindCursor.hpp index e537ed84dd..757d9808a9 100644 --- a/lib/libunwind/src/UnwindCursor.hpp +++ b/lib/libunwind/src/UnwindCursor.hpp @@ -1737,14 +1737,16 @@ bool UnwindCursor<A, R>::getInfoFromCompactEncodingSection(pint_t pc, else funcEnd = firstLevelNextPageFunctionOffset + sects.dso_base; if (pc < funcStart) { - _LIBUNWIND_DEBUG_LOG("malformed __unwind_info, pc=0x%llX not in second " - "level compressed unwind table. funcStart=0x%llX", + _LIBUNWIND_DEBUG_LOG("malformed __unwind_info, pc=0x%llX " + "not in second level compressed unwind table. " + "funcStart=0x%llX", (uint64_t) pc, (uint64_t) funcStart); return false; } if (pc > funcEnd) { - _LIBUNWIND_DEBUG_LOG("malformed __unwind_info, pc=0x%llX not in second " - "level compressed unwind table. funcEnd=0x%llX", + _LIBUNWIND_DEBUG_LOG("malformed __unwind_info, pc=0x%llX " + "not in second level compressed unwind table. " + "funcEnd=0x%llX", (uint64_t) pc, (uint64_t) funcEnd); return false; } @@ -1764,9 +1766,9 @@ bool UnwindCursor<A, R>::getInfoFromCompactEncodingSection(pint_t pc, pageEncodingIndex * sizeof(uint32_t)); } } else { - _LIBUNWIND_DEBUG_LOG("malformed __unwind_info at 0x%0llX bad second " - "level page", - (uint64_t) sects.compact_unwind_section); + _LIBUNWIND_DEBUG_LOG( + "malformed __unwind_info at 0x%0llX bad second level page", + (uint64_t)sects.compact_unwind_section); return false; } diff --git a/lib/libunwind/src/UnwindRegistersRestore.S b/lib/libunwind/src/UnwindRegistersRestore.S index 289afe98b0..d8bf1adee4 100644 --- a/lib/libunwind/src/UnwindRegistersRestore.S +++ b/lib/libunwind/src/UnwindRegistersRestore.S @@ -134,7 +134,7 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv) // load register (GPR) #define PPC64_LR(n) \ - ld %r##n, (8 * (n + 2))(%r3) + ld n, (8 * (n + 2))(3) // restore integral registers // skip r0 for now @@ -176,12 +176,12 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv) // (note that this also restores floating point registers and V registers, // because part of VS is mapped to these registers) - addi %r4, %r3, PPC64_OFFS_FP + addi 4, 3, PPC64_OFFS_FP // load VS register #define PPC64_LVS(n) \ - lxvd2x %vs##n, 0, %r4 ;\ - addi %r4, %r4, 16 + lxvd2x n, 0, 4 ;\ + addi 4, 4, 16 // restore the first 32 VS regs (and also all floating point regs) PPC64_LVS(0) @@ -220,23 +220,23 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv) // use VRSAVE to conditionally restore the remaining VS regs, // that are where the V regs are mapped - ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave - cmpwi %r5, 0 + ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave + cmpwi 5, 0 beq Lnovec // conditionally load VS #define PPC64_CLVS_BOTTOM(n) \ beq Ldone##n ;\ - addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\ - lxvd2x %vs##n, 0, %r4 ;\ + addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ + lxvd2x n, 0, 4 ;\ Ldone##n: -#define PPC64_CLVSl(n) \ - andis. %r0, %r5, (1<<(47-n)) ;\ +#define PPC64_CLVSl(n) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\ PPC64_CLVS_BOTTOM(n) -#define PPC64_CLVSh(n) \ - andi. %r0, %r5, (1<<(63-n)) ;\ +#define PPC64_CLVSh(n) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\ PPC64_CLVS_BOTTOM(n) PPC64_CLVSl(32) @@ -276,7 +276,7 @@ PPC64_CLVS_BOTTOM(n) // load FP register #define PPC64_LF(n) \ - lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3) + lfd n, (PPC64_OFFS_FP + n * 16)(3) // restore float registers PPC64_LF(0) @@ -314,30 +314,30 @@ PPC64_CLVS_BOTTOM(n) #if defined(__ALTIVEC__) // restore vector registers if any are in use - ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave - cmpwi %r5, 0 + ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave + cmpwi 5, 0 beq Lnovec - subi %r4, %r1, 16 + subi 4, 1, 16 // r4 is now a 16-byte aligned pointer into the red zone // the _vectorScalarRegisters may not be 16-byte aligned // so copy via red zone temp buffer #define PPC64_CLV_UNALIGNED_BOTTOM(n) \ beq Ldone##n ;\ - ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\ - std %r0, 0(%r4) ;\ - ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\ - std %r0, 8(%r4) ;\ - lvx %v##n, 0, %r4 ;\ + ld 0, (PPC64_OFFS_V + n * 16)(3) ;\ + std 0, 0(4) ;\ + ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\ + std 0, 8(4) ;\ + lvx n, 0, 4 ;\ Ldone ## n: -#define PPC64_CLV_UNALIGNEDl(n) \ - andis. %r0, %r5, (1<<(15-n)) ;\ +#define PPC64_CLV_UNALIGNEDl(n) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\ PPC64_CLV_UNALIGNED_BOTTOM(n) -#define PPC64_CLV_UNALIGNEDh(n) \ - andi. %r0, %r5, (1<<(31-n)) ;\ +#define PPC64_CLV_UNALIGNEDh(n) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\ PPC64_CLV_UNALIGNED_BOTTOM(n) PPC64_CLV_UNALIGNEDl(0) @@ -377,10 +377,10 @@ PPC64_CLV_UNALIGNED_BOTTOM(n) #endif Lnovec: - ld %r0, PPC64_OFFS_CR(%r3) - mtcr %r0 - ld %r0, PPC64_OFFS_SRR0(%r3) - mtctr %r0 + ld 0, PPC64_OFFS_CR(3) + mtcr 0 + ld 0, PPC64_OFFS_SRR0(3) + mtctr 0 PPC64_LR(0) PPC64_LR(5) @@ -402,111 +402,111 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv) // restore integral registerrs // skip r0 for now // skip r1 for now - lwz %r2, 16(%r3) + lwz 2, 16(3) // skip r3 for now // skip r4 for now // skip r5 for now - lwz %r6, 32(%r3) - lwz %r7, 36(%r3) - lwz %r8, 40(%r3) - lwz %r9, 44(%r3) - lwz %r10, 48(%r3) - lwz %r11, 52(%r3) - lwz %r12, 56(%r3) - lwz %r13, 60(%r3) - lwz %r14, 64(%r3) - lwz %r15, 68(%r3) - lwz %r16, 72(%r3) - lwz %r17, 76(%r3) - lwz %r18, 80(%r3) - lwz %r19, 84(%r3) - lwz %r20, 88(%r3) - lwz %r21, 92(%r3) - lwz %r22, 96(%r3) - lwz %r23,100(%r3) - lwz %r24,104(%r3) - lwz %r25,108(%r3) - lwz %r26,112(%r3) - lwz %r27,116(%r3) - lwz %r28,120(%r3) - lwz %r29,124(%r3) - lwz %r30,128(%r3) - lwz %r31,132(%r3) + lwz 6, 32(3) + lwz 7, 36(3) + lwz 8, 40(3) + lwz 9, 44(3) + lwz 10, 48(3) + lwz 11, 52(3) + lwz 12, 56(3) + lwz 13, 60(3) + lwz 14, 64(3) + lwz 15, 68(3) + lwz 16, 72(3) + lwz 17, 76(3) + lwz 18, 80(3) + lwz 19, 84(3) + lwz 20, 88(3) + lwz 21, 92(3) + lwz 22, 96(3) + lwz 23,100(3) + lwz 24,104(3) + lwz 25,108(3) + lwz 26,112(3) + lwz 27,116(3) + lwz 28,120(3) + lwz 29,124(3) + lwz 30,128(3) + lwz 31,132(3) #ifndef __NO_FPRS__ // restore float registers - lfd %f0, 160(%r3) - lfd %f1, 168(%r3) - lfd %f2, 176(%r3) - lfd %f3, 184(%r3) - lfd %f4, 192(%r3) - lfd %f5, 200(%r3) - lfd %f6, 208(%r3) - lfd %f7, 216(%r3) - lfd %f8, 224(%r3) - lfd %f9, 232(%r3) - lfd %f10,240(%r3) - lfd %f11,248(%r3) - lfd %f12,256(%r3) - lfd %f13,264(%r3) - lfd %f14,272(%r3) - lfd %f15,280(%r3) - lfd %f16,288(%r3) - lfd %f17,296(%r3) - lfd %f18,304(%r3) - lfd %f19,312(%r3) - lfd %f20,320(%r3) - lfd %f21,328(%r3) - lfd %f22,336(%r3) - lfd %f23,344(%r3) - lfd %f24,352(%r3) - lfd %f25,360(%r3) - lfd %f26,368(%r3) - lfd %f27,376(%r3) - lfd %f28,384(%r3) - lfd %f29,392(%r3) - lfd %f30,400(%r3) - lfd %f31,408(%r3) + lfd 0, 160(3) + lfd 1, 168(3) + lfd 2, 176(3) + lfd 3, 184(3) + lfd 4, 192(3) + lfd 5, 200(3) + lfd 6, 208(3) + lfd 7, 216(3) + lfd 8, 224(3) + lfd 9, 232(3) + lfd 10,240(3) + lfd 11,248(3) + lfd 12,256(3) + lfd 13,264(3) + lfd 14,272(3) + lfd 15,280(3) + lfd 16,288(3) + lfd 17,296(3) + lfd 18,304(3) + lfd 19,312(3) + lfd 20,320(3) + lfd 21,328(3) + lfd 22,336(3) + lfd 23,344(3) + lfd 24,352(3) + lfd 25,360(3) + lfd 26,368(3) + lfd 27,376(3) + lfd 28,384(3) + lfd 29,392(3) + lfd 30,400(3) + lfd 31,408(3) #endif #if defined(__ALTIVEC__) // restore vector registers if any are in use - lwz %r5, 156(%r3) // test VRsave - cmpwi %r5, 0 + lwz 5, 156(3) // test VRsave + cmpwi 5, 0 beq Lnovec - subi %r4, %r1, 16 - rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits + subi 4, 1, 16 + rlwinm 4, 4, 0, 0, 27 // mask low 4-bits // r4 is now a 16-byte aligned pointer into the red zone // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer - -#define LOAD_VECTOR_UNALIGNEDl(_index) \ - andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \ + +#define LOAD_VECTOR_UNALIGNEDl(_index) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \ beq Ldone ## _index SEPARATOR \ - lwz %r0, 424+_index*16(%r3) SEPARATOR \ - stw %r0, 0(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+4(%r3) SEPARATOR \ - stw %r0, 4(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+8(%r3) SEPARATOR \ - stw %r0, 8(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+12(%r3) SEPARATOR \ - stw %r0, 12(%r4) SEPARATOR \ - lvx %v ## _index, 0, %r4 SEPARATOR \ + lwz 0, 424+_index*16(3) SEPARATOR \ + stw 0, 0(%r4) SEPARATOR \ + lwz 0, 424+_index*16+4(%r3) SEPARATOR \ + stw 0, 4(%r4) SEPARATOR \ + lwz 0, 424+_index*16+8(%r3) SEPARATOR \ + stw 0, 8(%r4) SEPARATOR \ + lwz 0, 424+_index*16+12(%r3) SEPARATOR \ + stw 0, 12(%r4) SEPARATOR \ + lvx _index, 0, 4 SEPARATOR \ Ldone ## _index: -#define LOAD_VECTOR_UNALIGNEDh(_index) \ - andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \ +#define LOAD_VECTOR_UNALIGNEDh(_index) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \ beq Ldone ## _index SEPARATOR \ - lwz %r0, 424+_index*16(%r3) SEPARATOR \ - stw %r0, 0(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+4(%r3) SEPARATOR \ - stw %r0, 4(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+8(%r3) SEPARATOR \ - stw %r0, 8(%r4) SEPARATOR \ - lwz %r0, 424+_index*16+12(%r3) SEPARATOR \ - stw %r0, 12(%r4) SEPARATOR \ - lvx %v ## _index, 0, %r4 SEPARATOR \ + lwz 0, 424+_index*16(3) SEPARATOR \ + stw 0, 0(4) SEPARATOR \ + lwz 0, 424+_index*16+4(3) SEPARATOR \ + stw 0, 4(4) SEPARATOR \ + lwz 0, 424+_index*16+8(3) SEPARATOR \ + stw 0, 8(%r4) SEPARATOR \ + lwz 0, 424+_index*16+12(3) SEPARATOR \ + stw 0, 12(4) SEPARATOR \ + lvx _index, 0, 4 SEPARATOR \ Ldone ## _index: @@ -545,17 +545,17 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv) #endif Lnovec: - lwz %r0, 136(%r3) // __cr - mtcr %r0 - lwz %r0, 148(%r3) // __ctr - mtctr %r0 - lwz %r0, 0(%r3) // __ssr0 - mtctr %r0 - lwz %r0, 8(%r3) // do r0 now - lwz %r5, 28(%r3) // do r5 now - lwz %r4, 24(%r3) // do r4 now - lwz %r1, 12(%r3) // do sp now - lwz %r3, 20(%r3) // do r3 last + lwz 0, 136(3) // __cr + mtcr 0 + lwz 0, 148(3) // __ctr + mtctr 0 + lwz 0, 0(3) // __ssr0 + mtctr 0 + lwz 0, 8(3) // do r0 now + lwz 5, 28(3) // do r5 now + lwz 4, 24(3) // do r4 now + lwz 1, 12(3) // do sp now + lwz 3, 20(3) // do r3 last bctr #elif defined(__aarch64__) @@ -1072,7 +1072,7 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv) jmp %o7 nop -#elif defined(__riscv) && __riscv_xlen == 64 +#elif defined(__riscv) // // void libunwind::Registers_riscv::jumpto() @@ -1082,74 +1082,74 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv) // .p2align 2 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv) -#if defined(__riscv_flen) && __riscv_flen == 64 - fld f0, (8 * 32 + 8 * 0)(a0) - fld f1, (8 * 32 + 8 * 1)(a0) - fld f2, (8 * 32 + 8 * 2)(a0) - fld f3, (8 * 32 + 8 * 3)(a0) - fld f4, (8 * 32 + 8 * 4)(a0) - fld f5, (8 * 32 + 8 * 5)(a0) - fld f6, (8 * 32 + 8 * 6)(a0) - fld f7, (8 * 32 + 8 * 7)(a0) - fld f8, (8 * 32 + 8 * 8)(a0) - fld f9, (8 * 32 + 8 * 9)(a0) - fld f10, (8 * 32 + 8 * 10)(a0) - fld f11, (8 * 32 + 8 * 11)(a0) - fld f12, (8 * 32 + 8 * 12)(a0) - fld f13, (8 * 32 + 8 * 13)(a0) - fld f14, (8 * 32 + 8 * 14)(a0) - fld f15, (8 * 32 + 8 * 15)(a0) - fld f16, (8 * 32 + 8 * 16)(a0) - fld f17, (8 * 32 + 8 * 17)(a0) - fld f18, (8 * 32 + 8 * 18)(a0) - fld f19, (8 * 32 + 8 * 19)(a0) - fld f20, (8 * 32 + 8 * 20)(a0) - fld f21, (8 * 32 + 8 * 21)(a0) - fld f22, (8 * 32 + 8 * 22)(a0) - fld f23, (8 * 32 + 8 * 23)(a0) - fld f24, (8 * 32 + 8 * 24)(a0) - fld f25, (8 * 32 + 8 * 25)(a0) - fld f26, (8 * 32 + 8 * 26)(a0) - fld f27, (8 * 32 + 8 * 27)(a0) - fld f28, (8 * 32 + 8 * 28)(a0) - fld f29, (8 * 32 + 8 * 29)(a0) - fld f30, (8 * 32 + 8 * 30)(a0) - fld f31, (8 * 32 + 8 * 31)(a0) -#endif +# if defined(__riscv_flen) + FLOAD f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0) + FLOAD f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0) + FLOAD f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0) + FLOAD f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0) + FLOAD f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0) + FLOAD f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0) + FLOAD f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0) + FLOAD f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0) + FLOAD f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0) + FLOAD f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0) + FLOAD f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0) + FLOAD f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0) + FLOAD f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0) + FLOAD f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0) + FLOAD f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0) + FLOAD f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0) + FLOAD f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0) + FLOAD f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0) + FLOAD f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0) + FLOAD f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0) + FLOAD f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0) + FLOAD f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0) + FLOAD f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0) + FLOAD f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0) + FLOAD f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0) + FLOAD f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0) + FLOAD f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0) + FLOAD f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0) + FLOAD f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0) + FLOAD f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0) + FLOAD f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0) + FLOAD f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0) +# endif // x0 is zero - ld x1, (8 * 0)(a0) // restore pc into ra - ld x2, (8 * 2)(a0) - ld x3, (8 * 3)(a0) - ld x4, (8 * 4)(a0) - ld x5, (8 * 5)(a0) - ld x6, (8 * 6)(a0) - ld x7, (8 * 7)(a0) - ld x8, (8 * 8)(a0) - ld x9, (8 * 9)(a0) + ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra + ILOAD x2, (RISCV_ISIZE * 2)(a0) + ILOAD x3, (RISCV_ISIZE * 3)(a0) + ILOAD x4, (RISCV_ISIZE * 4)(a0) + ILOAD x5, (RISCV_ISIZE * 5)(a0) + ILOAD x6, (RISCV_ISIZE * 6)(a0) + ILOAD x7, (RISCV_ISIZE * 7)(a0) + ILOAD x8, (RISCV_ISIZE * 8)(a0) + ILOAD x9, (RISCV_ISIZE * 9)(a0) // skip a0 for now - ld x11, (8 * 11)(a0) - ld x12, (8 * 12)(a0) - ld x13, (8 * 13)(a0) - ld x14, (8 * 14)(a0) - ld x15, (8 * 15)(a0) - ld x16, (8 * 16)(a0) - ld x17, (8 * 17)(a0) - ld x18, (8 * 18)(a0) - ld x19, (8 * 19)(a0) - ld x20, (8 * 20)(a0) - ld x21, (8 * 21)(a0) - ld x22, (8 * 22)(a0) - ld x23, (8 * 23)(a0) - ld x24, (8 * 24)(a0) - ld x25, (8 * 25)(a0) - ld x26, (8 * 26)(a0) - ld x27, (8 * 27)(a0) - ld x28, (8 * 28)(a0) - ld x29, (8 * 29)(a0) - ld x30, (8 * 30)(a0) - ld x31, (8 * 31)(a0) - ld x10, (8 * 10)(a0) // restore a0 + ILOAD x11, (RISCV_ISIZE * 11)(a0) + ILOAD x12, (RISCV_ISIZE * 12)(a0) + ILOAD x13, (RISCV_ISIZE * 13)(a0) + ILOAD x14, (RISCV_ISIZE * 14)(a0) + ILOAD x15, (RISCV_ISIZE * 15)(a0) + ILOAD x16, (RISCV_ISIZE * 16)(a0) + ILOAD x17, (RISCV_ISIZE * 17)(a0) + ILOAD x18, (RISCV_ISIZE * 18)(a0) + ILOAD x19, (RISCV_ISIZE * 19)(a0) + ILOAD x20, (RISCV_ISIZE * 20)(a0) + ILOAD x21, (RISCV_ISIZE * 21)(a0) + ILOAD x22, (RISCV_ISIZE * 22)(a0) + ILOAD x23, (RISCV_ISIZE * 23)(a0) + ILOAD x24, (RISCV_ISIZE * 24)(a0) + ILOAD x25, (RISCV_ISIZE * 25)(a0) + ILOAD x26, (RISCV_ISIZE * 26)(a0) + ILOAD x27, (RISCV_ISIZE * 27)(a0) + ILOAD x28, (RISCV_ISIZE * 28)(a0) + ILOAD x29, (RISCV_ISIZE * 29)(a0) + ILOAD x30, (RISCV_ISIZE * 30)(a0) + ILOAD x31, (RISCV_ISIZE * 31)(a0) + ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0 ret // jump to ra diff --git a/lib/libunwind/src/UnwindRegistersSave.S b/lib/libunwind/src/UnwindRegistersSave.S index 94fc836545..f66dc532c2 100644 --- a/lib/libunwind/src/UnwindRegistersSave.S +++ b/lib/libunwind/src/UnwindRegistersSave.S @@ -335,12 +335,12 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) // store register (GPR) #define PPC64_STR(n) \ - std %r##n, (8 * (n + 2))(%r3) + std n, (8 * (n + 2))(3) // save GPRs PPC64_STR(0) - mflr %r0 - std %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0 + mflr 0 + std 0, PPC64_OFFS_SRR0(3) // store lr as ssr0 PPC64_STR(1) PPC64_STR(2) PPC64_STR(3) @@ -373,28 +373,28 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) PPC64_STR(30) PPC64_STR(31) - mfcr %r0 - std %r0, PPC64_OFFS_CR(%r3) - mfxer %r0 - std %r0, PPC64_OFFS_XER(%r3) - mflr %r0 - std %r0, PPC64_OFFS_LR(%r3) - mfctr %r0 - std %r0, PPC64_OFFS_CTR(%r3) - mfvrsave %r0 - std %r0, PPC64_OFFS_VRSAVE(%r3) + mfcr 0 + std 0, PPC64_OFFS_CR(3) + mfxer 0 + std 0, PPC64_OFFS_XER(3) + mflr 0 + std 0, PPC64_OFFS_LR(3) + mfctr 0 + std 0, PPC64_OFFS_CTR(3) + mfvrsave 0 + std 0, PPC64_OFFS_VRSAVE(3) #if defined(__VSX__) // save VS registers // (note that this also saves floating point registers and V registers, // because part of VS is mapped to these registers) - addi %r4, %r3, PPC64_OFFS_FP + addi 4, 3, PPC64_OFFS_FP // store VS register #define PPC64_STVS(n) \ - stxvd2x %vs##n, 0, %r4 ;\ - addi %r4, %r4, 16 + stxvd2x n, 0, 4 ;\ + addi 4, 4, 16 PPC64_STVS(0) PPC64_STVS(1) @@ -465,7 +465,7 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) // store FP register #define PPC64_STF(n) \ - stfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3) + stfd n, (PPC64_OFFS_FP + n * 16)(3) // save float registers PPC64_STF(0) @@ -507,14 +507,14 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) // Use 16-bytes below the stack pointer as an // aligned buffer to save each vector register. // Note that the stack pointer is always 16-byte aligned. - subi %r4, %r1, 16 + subi 4, 1, 16 -#define PPC64_STV_UNALIGNED(n) \ - stvx %v##n, 0, %r4 ;\ - ld %r5, 0(%r4) ;\ - std %r5, (PPC64_OFFS_V + n * 16)(%r3) ;\ - ld %r5, 8(%r4) ;\ - std %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3) +#define PPC64_STV_UNALIGNED(n) \ + stvx n, 0, 4 ;\ + ld 5, 0(4) ;\ + std 5, (PPC64_OFFS_V + n * 16)(3) ;\ + ld 5, 8(4) ;\ + std 5, (PPC64_OFFS_V + n * 16 + 8)(3) PPC64_STV_UNALIGNED(0) PPC64_STV_UNALIGNED(1) @@ -552,7 +552,7 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) #endif #endif - li %r3, 0 // return UNW_ESUCCESS + li 3, 0 // return UNW_ESUCCESS blr @@ -565,140 +565,140 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) // thread_state pointer is in r3 // DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) - stw %r0, 8(%r3) - mflr %r0 - stw %r0, 0(%r3) // store lr as ssr0 - stw %r1, 12(%r3) - stw %r2, 16(%r3) - stw %r3, 20(%r3) - stw %r4, 24(%r3) - stw %r5, 28(%r3) - stw %r6, 32(%r3) - stw %r7, 36(%r3) - stw %r8, 40(%r3) - stw %r9, 44(%r3) - stw %r10, 48(%r3) - stw %r11, 52(%r3) - stw %r12, 56(%r3) - stw %r13, 60(%r3) - stw %r14, 64(%r3) - stw %r15, 68(%r3) - stw %r16, 72(%r3) - stw %r17, 76(%r3) - stw %r18, 80(%r3) - stw %r19, 84(%r3) - stw %r20, 88(%r3) - stw %r21, 92(%r3) - stw %r22, 96(%r3) - stw %r23,100(%r3) - stw %r24,104(%r3) - stw %r25,108(%r3) - stw %r26,112(%r3) - stw %r27,116(%r3) - stw %r28,120(%r3) - stw %r29,124(%r3) - stw %r30,128(%r3) - stw %r31,132(%r3) + stw 0, 8(3) + mflr 0 + stw 0, 0(3) // store lr as ssr0 + stw 1, 12(3) + stw 2, 16(3) + stw 3, 20(3) + stw 4, 24(3) + stw 5, 28(3) + stw 6, 32(3) + stw 7, 36(3) + stw 8, 40(3) + stw 9, 44(3) + stw 10, 48(3) + stw 11, 52(3) + stw 12, 56(3) + stw 13, 60(3) + stw 14, 64(3) + stw 15, 68(3) + stw 16, 72(3) + stw 17, 76(3) + stw 18, 80(3) + stw 19, 84(3) + stw 20, 88(3) + stw 21, 92(3) + stw 22, 96(3) + stw 23,100(3) + stw 24,104(3) + stw 25,108(3) + stw 26,112(3) + stw 27,116(3) + stw 28,120(3) + stw 29,124(3) + stw 30,128(3) + stw 31,132(3) // save VRSave register - mfspr %r0, 256 - stw %r0, 156(%r3) + mfspr 0, 256 + stw 0, 156(3) // save CR registers - mfcr %r0 - stw %r0, 136(%r3) + mfcr 0 + stw 0, 136(3) // save CTR register - mfctr %r0 - stw %r0, 148(%r3) + mfctr 0 + stw 0, 148(3) #if !defined(__NO_FPRS__) // save float registers - stfd %f0, 160(%r3) - stfd %f1, 168(%r3) - stfd %f2, 176(%r3) - stfd %f3, 184(%r3) - stfd %f4, 192(%r3) - stfd %f5, 200(%r3) - stfd %f6, 208(%r3) - stfd %f7, 216(%r3) - stfd %f8, 224(%r3) - stfd %f9, 232(%r3) - stfd %f10,240(%r3) - stfd %f11,248(%r3) - stfd %f12,256(%r3) - stfd %f13,264(%r3) - stfd %f14,272(%r3) - stfd %f15,280(%r3) - stfd %f16,288(%r3) - stfd %f17,296(%r3) - stfd %f18,304(%r3) - stfd %f19,312(%r3) - stfd %f20,320(%r3) - stfd %f21,328(%r3) - stfd %f22,336(%r3) - stfd %f23,344(%r3) - stfd %f24,352(%r3) - stfd %f25,360(%r3) - stfd %f26,368(%r3) - stfd %f27,376(%r3) - stfd %f28,384(%r3) - stfd %f29,392(%r3) - stfd %f30,400(%r3) - stfd %f31,408(%r3) + stfd 0, 160(3) + stfd 1, 168(3) + stfd 2, 176(3) + stfd 3, 184(3) + stfd 4, 192(3) + stfd 5, 200(3) + stfd 6, 208(3) + stfd 7, 216(3) + stfd 8, 224(3) + stfd 9, 232(3) + stfd 10,240(3) + stfd 11,248(3) + stfd 12,256(3) + stfd 13,264(3) + stfd 14,272(3) + stfd 15,280(3) + stfd 16,288(3) + stfd 17,296(3) + stfd 18,304(3) + stfd 19,312(3) + stfd 20,320(3) + stfd 21,328(3) + stfd 22,336(3) + stfd 23,344(3) + stfd 24,352(3) + stfd 25,360(3) + stfd 26,368(3) + stfd 27,376(3) + stfd 28,384(3) + stfd 29,392(3) + stfd 30,400(3) + stfd 31,408(3) #endif #if defined(__ALTIVEC__) // save vector registers - subi %r4, %r1, 16 - rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits + subi 4, 1, 16 + rlwinm 4, 4, 0, 0, 27 // mask low 4-bits // r4 is now a 16-byte aligned pointer into the red zone #define SAVE_VECTOR_UNALIGNED(_vec, _offset) \ - stvx _vec, 0, %r4 SEPARATOR \ - lwz %r5, 0(%r4) SEPARATOR \ - stw %r5, _offset(%r3) SEPARATOR \ - lwz %r5, 4(%r4) SEPARATOR \ - stw %r5, _offset+4(%r3) SEPARATOR \ - lwz %r5, 8(%r4) SEPARATOR \ - stw %r5, _offset+8(%r3) SEPARATOR \ - lwz %r5, 12(%r4) SEPARATOR \ - stw %r5, _offset+12(%r3) - - SAVE_VECTOR_UNALIGNED( %v0, 424+0x000) - SAVE_VECTOR_UNALIGNED( %v1, 424+0x010) - SAVE_VECTOR_UNALIGNED( %v2, 424+0x020) - SAVE_VECTOR_UNALIGNED( %v3, 424+0x030) - SAVE_VECTOR_UNALIGNED( %v4, 424+0x040) - SAVE_VECTOR_UNALIGNED( %v5, 424+0x050) - SAVE_VECTOR_UNALIGNED( %v6, 424+0x060) - SAVE_VECTOR_UNALIGNED( %v7, 424+0x070) - SAVE_VECTOR_UNALIGNED( %v8, 424+0x080) - SAVE_VECTOR_UNALIGNED( %v9, 424+0x090) - SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0) - SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0) - SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0) - SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0) - SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0) - SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0) - SAVE_VECTOR_UNALIGNED(%v16, 424+0x100) - SAVE_VECTOR_UNALIGNED(%v17, 424+0x110) - SAVE_VECTOR_UNALIGNED(%v18, 424+0x120) - SAVE_VECTOR_UNALIGNED(%v19, 424+0x130) - SAVE_VECTOR_UNALIGNED(%v20, 424+0x140) - SAVE_VECTOR_UNALIGNED(%v21, 424+0x150) - SAVE_VECTOR_UNALIGNED(%v22, 424+0x160) - SAVE_VECTOR_UNALIGNED(%v23, 424+0x170) - SAVE_VECTOR_UNALIGNED(%v24, 424+0x180) - SAVE_VECTOR_UNALIGNED(%v25, 424+0x190) - SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0) - SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0) - SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0) - SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0) - SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0) - SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0) + stvx _vec, 0, 4 SEPARATOR \ + lwz 5, 0(4) SEPARATOR \ + stw 5, _offset(3) SEPARATOR \ + lwz 5, 4(4) SEPARATOR \ + stw 5, _offset+4(3) SEPARATOR \ + lwz 5, 8(4) SEPARATOR \ + stw 5, _offset+8(3) SEPARATOR \ + lwz 5, 12(4) SEPARATOR \ + stw 5, _offset+12(3) + + SAVE_VECTOR_UNALIGNED( 0, 424+0x000) + SAVE_VECTOR_UNALIGNED( 1, 424+0x010) + SAVE_VECTOR_UNALIGNED( 2, 424+0x020) + SAVE_VECTOR_UNALIGNED( 3, 424+0x030) + SAVE_VECTOR_UNALIGNED( 4, 424+0x040) + SAVE_VECTOR_UNALIGNED( 5, 424+0x050) + SAVE_VECTOR_UNALIGNED( 6, 424+0x060) + SAVE_VECTOR_UNALIGNED( 7, 424+0x070) + SAVE_VECTOR_UNALIGNED( 8, 424+0x080) + SAVE_VECTOR_UNALIGNED( 9, 424+0x090) + SAVE_VECTOR_UNALIGNED(10, 424+0x0A0) + SAVE_VECTOR_UNALIGNED(11, 424+0x0B0) + SAVE_VECTOR_UNALIGNED(12, 424+0x0C0) + SAVE_VECTOR_UNALIGNED(13, 424+0x0D0) + SAVE_VECTOR_UNALIGNED(14, 424+0x0E0) + SAVE_VECTOR_UNALIGNED(15, 424+0x0F0) + SAVE_VECTOR_UNALIGNED(16, 424+0x100) + SAVE_VECTOR_UNALIGNED(17, 424+0x110) + SAVE_VECTOR_UNALIGNED(18, 424+0x120) + SAVE_VECTOR_UNALIGNED(19, 424+0x130) + SAVE_VECTOR_UNALIGNED(20, 424+0x140) + SAVE_VECTOR_UNALIGNED(21, 424+0x150) + SAVE_VECTOR_UNALIGNED(22, 424+0x160) + SAVE_VECTOR_UNALIGNED(23, 424+0x170) + SAVE_VECTOR_UNALIGNED(24, 424+0x180) + SAVE_VECTOR_UNALIGNED(25, 424+0x190) + SAVE_VECTOR_UNALIGNED(26, 424+0x1A0) + SAVE_VECTOR_UNALIGNED(27, 424+0x1B0) + SAVE_VECTOR_UNALIGNED(28, 424+0x1C0) + SAVE_VECTOR_UNALIGNED(29, 424+0x1D0) + SAVE_VECTOR_UNALIGNED(30, 424+0x1E0) + SAVE_VECTOR_UNALIGNED(31, 424+0x1F0) #endif - li %r3, 0 // return UNW_ESUCCESS + li 3, 0 // return UNW_ESUCCESS blr @@ -1026,7 +1026,7 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) jmp %o7 clr %o0 // return UNW_ESUCCESS -#elif defined(__riscv) && __riscv_xlen == 64 +#elif defined(__riscv) # # extern int __unw_getcontext(unw_context_t* thread_state) @@ -1035,73 +1035,73 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) # thread_state pointer is in a0 # DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) - sd x1, (8 * 0)(a0) // store ra as pc - sd x1, (8 * 1)(a0) - sd x2, (8 * 2)(a0) - sd x3, (8 * 3)(a0) - sd x4, (8 * 4)(a0) - sd x5, (8 * 5)(a0) - sd x6, (8 * 6)(a0) - sd x7, (8 * 7)(a0) - sd x8, (8 * 8)(a0) - sd x9, (8 * 9)(a0) - sd x10, (8 * 10)(a0) - sd x11, (8 * 11)(a0) - sd x12, (8 * 12)(a0) - sd x13, (8 * 13)(a0) - sd x14, (8 * 14)(a0) - sd x15, (8 * 15)(a0) - sd x16, (8 * 16)(a0) - sd x17, (8 * 17)(a0) - sd x18, (8 * 18)(a0) - sd x19, (8 * 19)(a0) - sd x20, (8 * 20)(a0) - sd x21, (8 * 21)(a0) - sd x22, (8 * 22)(a0) - sd x23, (8 * 23)(a0) - sd x24, (8 * 24)(a0) - sd x25, (8 * 25)(a0) - sd x26, (8 * 26)(a0) - sd x27, (8 * 27)(a0) - sd x28, (8 * 28)(a0) - sd x29, (8 * 29)(a0) - sd x30, (8 * 30)(a0) - sd x31, (8 * 31)(a0) - -#if defined(__riscv_flen) && __riscv_flen == 64 - fsd f0, (8 * 32 + 8 * 0)(a0) - fsd f1, (8 * 32 + 8 * 1)(a0) - fsd f2, (8 * 32 + 8 * 2)(a0) - fsd f3, (8 * 32 + 8 * 3)(a0) - fsd f4, (8 * 32 + 8 * 4)(a0) - fsd f5, (8 * 32 + 8 * 5)(a0) - fsd f6, (8 * 32 + 8 * 6)(a0) - fsd f7, (8 * 32 + 8 * 7)(a0) - fsd f8, (8 * 32 + 8 * 8)(a0) - fsd f9, (8 * 32 + 8 * 9)(a0) - fsd f10, (8 * 32 + 8 * 10)(a0) - fsd f11, (8 * 32 + 8 * 11)(a0) - fsd f12, (8 * 32 + 8 * 12)(a0) - fsd f13, (8 * 32 + 8 * 13)(a0) - fsd f14, (8 * 32 + 8 * 14)(a0) - fsd f15, (8 * 32 + 8 * 15)(a0) - fsd f16, (8 * 32 + 8 * 16)(a0) - fsd f17, (8 * 32 + 8 * 17)(a0) - fsd f18, (8 * 32 + 8 * 18)(a0) - fsd f19, (8 * 32 + 8 * 19)(a0) - fsd f20, (8 * 32 + 8 * 20)(a0) - fsd f21, (8 * 32 + 8 * 21)(a0) - fsd f22, (8 * 32 + 8 * 22)(a0) - fsd f23, (8 * 32 + 8 * 23)(a0) - fsd f24, (8 * 32 + 8 * 24)(a0) - fsd f25, (8 * 32 + 8 * 25)(a0) - fsd f26, (8 * 32 + 8 * 26)(a0) - fsd f27, (8 * 32 + 8 * 27)(a0) - fsd f28, (8 * 32 + 8 * 28)(a0) - fsd f29, (8 * 32 + 8 * 29)(a0) - fsd f30, (8 * 32 + 8 * 30)(a0) - fsd f31, (8 * 32 + 8 * 31)(a0) -#endif + ISTORE x1, (RISCV_ISIZE * 0)(a0) // store ra as pc + ISTORE x1, (RISCV_ISIZE * 1)(a0) + ISTORE x2, (RISCV_ISIZE * 2)(a0) + ISTORE x3, (RISCV_ISIZE * 3)(a0) + ISTORE x4, (RISCV_ISIZE * 4)(a0) + ISTORE x5, (RISCV_ISIZE * 5)(a0) + ISTORE x6, (RISCV_ISIZE * 6)(a0) + ISTORE x7, (RISCV_ISIZE * 7)(a0) + ISTORE x8, (RISCV_ISIZE * 8)(a0) + ISTORE x9, (RISCV_ISIZE * 9)(a0) + ISTORE x10, (RISCV_ISIZE * 10)(a0) + ISTORE x11, (RISCV_ISIZE * 11)(a0) + ISTORE x12, (RISCV_ISIZE * 12)(a0) + ISTORE x13, (RISCV_ISIZE * 13)(a0) + ISTORE x14, (RISCV_ISIZE * 14)(a0) + ISTORE x15, (RISCV_ISIZE * 15)(a0) + ISTORE x16, (RISCV_ISIZE * 16)(a0) + ISTORE x17, (RISCV_ISIZE * 17)(a0) + ISTORE x18, (RISCV_ISIZE * 18)(a0) + ISTORE x19, (RISCV_ISIZE * 19)(a0) + ISTORE x20, (RISCV_ISIZE * 20)(a0) + ISTORE x21, (RISCV_ISIZE * 21)(a0) + ISTORE x22, (RISCV_ISIZE * 22)(a0) + ISTORE x23, (RISCV_ISIZE * 23)(a0) + ISTORE x24, (RISCV_ISIZE * 24)(a0) + ISTORE x25, (RISCV_ISIZE * 25)(a0) + ISTORE x26, (RISCV_ISIZE * 26)(a0) + ISTORE x27, (RISCV_ISIZE * 27)(a0) + ISTORE x28, (RISCV_ISIZE * 28)(a0) + ISTORE x29, (RISCV_ISIZE * 29)(a0) + ISTORE x30, (RISCV_ISIZE * 30)(a0) + ISTORE x31, (RISCV_ISIZE * 31)(a0) + +# if defined(__riscv_flen) + FSTORE f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0) + FSTORE f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0) + FSTORE f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0) + FSTORE f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0) + FSTORE f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0) + FSTORE f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0) + FSTORE f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0) + FSTORE f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0) + FSTORE f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0) + FSTORE f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0) + FSTORE f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0) + FSTORE f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0) + FSTORE f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0) + FSTORE f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0) + FSTORE f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0) + FSTORE f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0) + FSTORE f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0) + FSTORE f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0) + FSTORE f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0) + FSTORE f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0) + FSTORE f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0) + FSTORE f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0) + FSTORE f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0) + FSTORE f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0) + FSTORE f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0) + FSTORE f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0) + FSTORE f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0) + FSTORE f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0) + FSTORE f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0) + FSTORE f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0) + FSTORE f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0) + FSTORE f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0) +# endif li a0, 0 // return UNW_ESUCCESS ret // jump to ra diff --git a/lib/libunwind/src/assembly.h b/lib/libunwind/src/assembly.h index f2f7c84830..76ef825532 100644 --- a/lib/libunwind/src/assembly.h +++ b/lib/libunwind/src/assembly.h @@ -27,6 +27,35 @@ #define PPC64_OFFS_V 824 #elif defined(__APPLE__) && defined(__aarch64__) #define SEPARATOR %% +#elif defined(__riscv) +# define RISCV_ISIZE (__riscv_xlen / 8) +# define RISCV_FOFFSET (RISCV_ISIZE * 32) +# if defined(__riscv_flen) +# define RISCV_FSIZE (__riscv_flen / 8) +# endif + +# if __riscv_xlen == 64 +# define ILOAD ld +# define ISTORE sd +# elif __riscv_xlen == 32 +# define ILOAD lw +# define ISTORE sw +# else +# error "Unsupported __riscv_xlen" +# endif + +# if defined(__riscv_flen) +# if __riscv_flen == 64 +# define FLOAD fld +# define FSTORE fsd +# elif __riscv_flen == 32 +# define FLOAD flw +# define FSTORE fsw +# else +# error "Unsupported __riscv_flen" +# endif +# endif +# define SEPARATOR ; #else #define SEPARATOR ; #endif @@ -70,12 +99,15 @@ #if defined(__APPLE__) #define SYMBOL_IS_FUNC(name) -#define EXPORT_SYMBOL(name) #define HIDDEN_SYMBOL(name) .private_extern name -#define WEAK_SYMBOL(name) .weak_reference name +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define EXPORT_SYMBOL(name) HIDDEN_SYMBOL(name) +#else +#define EXPORT_SYMBOL(name) +#endif #define WEAK_ALIAS(name, aliasname) \ .globl SYMBOL_NAME(aliasname) SEPARATOR \ - WEAK_SYMBOL(aliasname) SEPARATOR \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ SYMBOL_NAME(aliasname) = SYMBOL_NAME(name) #define NO_EXEC_STACK_DIRECTIVE @@ -87,17 +119,23 @@ #else #define SYMBOL_IS_FUNC(name) .type name,@function #endif -#define EXPORT_SYMBOL(name) #define HIDDEN_SYMBOL(name) .hidden name +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define EXPORT_SYMBOL(name) HIDDEN_SYMBOL(name) +#else +#define EXPORT_SYMBOL(name) +#endif #define WEAK_SYMBOL(name) .weak name #if defined(__hexagon__) -#define WEAK_ALIAS(name, aliasname) \ - WEAK_SYMBOL(aliasname) SEPARATOR \ +#define WEAK_ALIAS(name, aliasname) \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + WEAK_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ .equiv SYMBOL_NAME(aliasname), SYMBOL_NAME(name) #else #define WEAK_ALIAS(name, aliasname) \ - WEAK_SYMBOL(aliasname) SEPARATOR \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + WEAK_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ SYMBOL_NAME(aliasname) = SYMBOL_NAME(name) #endif @@ -119,7 +157,7 @@ .section .drectve,"yn" SEPARATOR \ .ascii "-export:", #name, "\0" SEPARATOR \ .text -#if defined(_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS) +#if defined(_LIBUNWIND_HIDE_SYMBOLS) #define EXPORT_SYMBOL(name) #else #define EXPORT_SYMBOL(name) EXPORT_SYMBOL2(name) @@ -178,4 +216,8 @@ #endif #endif /* __arm__ */ +#if defined(__ppc__) || defined(__powerpc64__) +#define PPC_LEFT_SHIFT(index) << (index) +#endif + #endif /* UNWIND_ASSEMBLY_H */ diff --git a/lib/libunwind/src/config.h b/lib/libunwind/src/config.h index 9efed05405..2ab9d2f5e0 100644 --- a/lib/libunwind/src/config.h +++ b/lib/libunwind/src/config.h @@ -52,7 +52,8 @@ #endif #endif -#if defined(_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS) +#if defined(_LIBUNWIND_HIDE_SYMBOLS) + // The CMake file passes -fvisibility=hidden to control ELF/Mach-O visibility. #define _LIBUNWIND_EXPORT #define _LIBUNWIND_HIDDEN #else @@ -70,11 +71,15 @@ #define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name #if defined(__APPLE__) +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define _LIBUNWIND_ALIAS_VISIBILITY(name) __asm__(".private_extern " name); +#else +#define _LIBUNWIND_ALIAS_VISIBILITY(name) +#endif #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ __asm__(".globl " SYMBOL_NAME(aliasname)); \ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \ - extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ - __attribute__((weak_import)); + _LIBUNWIND_ALIAS_VISIBILITY(SYMBOL_NAME(aliasname)) #elif defined(__ELF__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ diff --git a/lib/libunwind/src/libunwind.cpp b/lib/libunwind/src/libunwind.cpp index c21461b1f4..1faf000ce4 100644 --- a/lib/libunwind/src/libunwind.cpp +++ b/lib/libunwind/src/libunwind.cpp @@ -16,6 +16,15 @@ #include <stdlib.h> +// Define the __has_feature extension for compilers that do not support it so +// that we can later check for the presence of ASan in a compiler-neutral way. +#if !defined(__has_feature) +#define __has_feature(feature) 0 +#endif + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +#include <sanitizer/asan_interface.h> +#endif #if !defined(__USING_SJLJ_EXCEPTIONS__) #include "AddressSpace.hpp" @@ -60,7 +69,7 @@ _LIBUNWIND_HIDDEN int __unw_init_local(unw_cursor_t *cursor, # warning The MIPS architecture is not supported with this ABI and environment! #elif defined(__sparc__) # define REGISTER_KIND Registers_sparc -#elif defined(__riscv) && __riscv_xlen == 64 +#elif defined(__riscv) # define REGISTER_KIND Registers_riscv #elif defined(__ve__) # define REGISTER_KIND Registers_ve @@ -184,6 +193,10 @@ _LIBUNWIND_WEAK_ALIAS(__unw_get_proc_info, unw_get_proc_info) /// Resume execution at cursor position (aka longjump). _LIBUNWIND_HIDDEN int __unw_resume(unw_cursor_t *cursor) { _LIBUNWIND_TRACE_API("__unw_resume(cursor=%p)", static_cast<void *>(cursor)); +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) + // Inform the ASan runtime that now might be a good time to clean stuff up. + __asan_handle_no_return(); +#endif AbstractUnwindCursor *co = (AbstractUnwindCursor *)cursor; co->jumpto(); return UNW_EUNSPEC; |
