Checking patch sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S... error: while searching for: * */ /* Offsets for data table __svml_stan_data_internal */ #define _sInvPI_uisa 0 #define _sPI1_uisa 16 #define _sPI2_uisa 32 #define _sPI3_uisa 48 #define _sPI2_ha_uisa 64 #define _sPI3_ha_uisa 80 #define Th_tbl_uisa 96 #define Tl_tbl_uisa 224 #define _sPC3_uisa 352 #define _sPC5_uisa 368 #define _sRangeReductionVal_uisa 384 #define _sInvPi 400 #define _sSignMask 416 #define _sAbsMask 432 #define _sRangeVal 448 #define _sRShifter 464 #define _sOne 480 #define _sRangeReductionVal 496 #define _sPI1 512 #define _sPI2 528 #define _sPI3 544 #define _sPI4 560 #define _sPI1_FMA 576 #define _sPI2_FMA 592 #define _sPI3_FMA 608 #define _sP0 624 #define _sP1 640 #define _sQ0 656 #define _sQ1 672 #define _sQ2 688 #define _sTwo 704 #define _sCoeffs 720 #include .section .text.sse4, "ax", @progbits ENTRY(_ZGVbN4v_tanf_sse4) subq $232, %rsp cfi_def_cfa_offset(240) movaps %xmm0, %xmm13 movups _sAbsMask+__svml_stan_data_internal(%rip), %xmm12 /* * Legacy Code * Here HW FMA can be unavailable */ xorl %eax, %eax movaps %xmm12, %xmm4 pxor %xmm10, %xmm10 movups _sInvPi+__svml_stan_data_internal(%rip), %xmm2 andps %xmm13, %xmm4 mulps %xmm4, %xmm2 /* Range reduction */ movaps %xmm4, %xmm1 /* * * Main path (_LA_ and _EP_) * * Octant calculation */ movups _sRShifter+__svml_stan_data_internal(%rip), %xmm3 /* Large values check */ movaps %xmm4, %xmm11 movups _sPI1+__svml_stan_data_internal(%rip), %xmm5 andnps %xmm13, %xmm12 movups _sPI2+__svml_stan_data_internal(%rip), %xmm6 addps %xmm3, %xmm2 cmpnleps _sRangeReductionVal+__svml_stan_data_internal(%rip), %xmm11 movaps %xmm2, %xmm8 movups _sPI3+__svml_stan_data_internal(%rip), %xmm7 subps %xmm3, %xmm8 movmskps %xmm11, %edx movups _sPI4+__svml_stan_data_internal(%rip), %xmm9 mulps %xmm8, %xmm5 mulps %xmm8, %xmm6 mulps %xmm8, %xmm7 subps %xmm5, %xmm1 mulps %xmm8, %xmm9 subps %xmm6, %xmm1 movups _sQ2+__svml_stan_data_internal(%rip), %xmm15 /* Inversion mask and sign calculation */ movaps %xmm2, %xmm5 /* Rational approximation */ movups _sP1+__svml_stan_data_internal(%rip), %xmm14 pslld $30, %xmm2 cmpneqps %xmm10, %xmm2 subps %xmm7, %xmm1 /* Exchanged numerator and denominator if necessary */ movaps %xmm2, %xmm0 movaps %xmm2, %xmm10 pslld $31, %xmm5 subps %xmm9, %xmm1 movaps %xmm1, %xmm3 pxor %xmm12, %xmm5 mulps %xmm1, %xmm3 mulps %xmm3, %xmm15 mulps %xmm3, %xmm14 addps _sQ1+__svml_stan_data_internal(%rip), %xmm15 addps _sP0+__svml_stan_data_internal(%rip), %xmm14 mulps %xmm15, %xmm3 mulps %xmm14, %xmm1 addps _sQ0+__svml_stan_data_internal(%rip), %xmm3 andnps %xmm1, %xmm0 andps %xmm3, %xmm10 andps %xmm2, %xmm1 andnps %xmm3, %xmm2 orps %xmm10, %xmm0 orps %xmm2, %xmm1 /* Division */ divps %xmm1, %xmm0 /* Sign setting */ pxor %xmm5, %xmm0 /* * * End of main path (_LA_ and _EP_) */ testl %edx, %edx /* Go to auxilary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4 xmm11 xmm12 xmm13 /* Return from auxilary branch * for out of main path inputs */ L(AUX_BRANCH_RETURN): testl %eax, %eax /* Go to special inputs processing branch */ jne L(SPECIAL_VALUES_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm13 /* Restore registers * and exit the function */ L(EXIT): addq $232, %rsp cfi_def_cfa_offset(8) ret cfi_def_cfa_offset(240) /* Branch to process * special inputs */ L(SPECIAL_VALUES_BRANCH): movups %xmm13, 32(%rsp) movups %xmm0, 48(%rsp) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xorl %edx, %edx movq %r12, 16(%rsp) cfi_offset(12, -224) movl %edx, %r12d movq %r13, 8(%rsp) cfi_offset(13, -232) movl %eax, %r13d movq %r14, (%rsp) cfi_offset(14, -240) # LOE rbx rbp r15 r12d r13d /* Range mask * bits check */ L(RANGEMASK_CHECK): btl %r12d, %r13d /* Call scalar math function */ jc L(SCALAR_MATH_CALL) # LOE rbx rbp r15 r12d r13d /* Special inputs * processing loop */ L(SPECIAL_VALUES_LOOP): incl %r12d cmpl $4, %r12 error: patch failed: sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S:45 error: sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S: patch does not apply